Add portdbapi support for a metadata/layout.conf file which
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if isinstance(mysize, basestring):
282                 return mysize
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 self.sets = self.setconfig.getSets()
774                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775
776 def create_world_atom(pkg, args_set, root_config):
777         """Create a new atom for the world file if one does not exist.  If the
778         argument atom is precise enough to identify a specific slot then a slot
779         atom will be returned. Atoms that are in the system set may also be stored
780         in world since system atoms can only match one slot while world atoms can
781         be greedy with respect to slots.  Unslotted system packages will not be
782         stored in world."""
783
784         arg_atom = args_set.findAtomForPackage(pkg)
785         if not arg_atom:
786                 return None
787         cp = portage.dep_getkey(arg_atom)
788         new_world_atom = cp
789         sets = root_config.sets
790         portdb = root_config.trees["porttree"].dbapi
791         vardb = root_config.trees["vartree"].dbapi
792         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793                 for cpv in portdb.match(cp))
794         slotted = len(available_slots) > 1 or \
795                 (len(available_slots) == 1 and "0" not in available_slots)
796         if not slotted:
797                 # check the vdb in case this is multislot
798                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799                         for cpv in vardb.match(cp))
800                 slotted = len(available_slots) > 1 or \
801                         (len(available_slots) == 1 and "0" not in available_slots)
802         if slotted and arg_atom != cp:
803                 # If the user gave a specific atom, store it as a
804                 # slot atom in the world file.
805                 slot_atom = pkg.slot_atom
806
807                 # For USE=multislot, there are a couple of cases to
808                 # handle here:
809                 #
810                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811                 #    unknown value, so just record an unslotted atom.
812                 #
813                 # 2) SLOT comes from an installed package and there is no
814                 #    matching SLOT in the portage tree.
815                 #
816                 # Make sure that the slot atom is available in either the
817                 # portdb or the vardb, since otherwise the user certainly
818                 # doesn't want the SLOT atom recorded in the world file
819                 # (case 1 above).  If it's only available in the vardb,
820                 # the user may be trying to prevent a USE=multislot
821                 # package from being removed by --depclean (case 2 above).
822
823                 mydb = portdb
824                 if not portdb.match(slot_atom):
825                         # SLOT seems to come from an installed multislot package
826                         mydb = vardb
827                 # If there is no installed package matching the SLOT atom,
828                 # it probably changed SLOT spontaneously due to USE=multislot,
829                 # so just record an unslotted atom.
830                 if vardb.match(slot_atom):
831                         # Now verify that the argument is precise
832                         # enough to identify a specific slot.
833                         matches = mydb.match(arg_atom)
834                         matched_slots = set()
835                         for cpv in matches:
836                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837                         if len(matched_slots) == 1:
838                                 new_world_atom = slot_atom
839
840         if new_world_atom == sets["world"].findAtomForPackage(pkg):
841                 # Both atoms would be identical, so there's nothing to add.
842                 return None
843         if not slotted:
844                 # Unlike world atoms, system atoms are not greedy for slots, so they
845                 # can't be safely excluded from world if they are slotted.
846                 system_atom = sets["system"].findAtomForPackage(pkg)
847                 if system_atom:
848                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
849                                 return None
850                         # System virtuals aren't safe to exclude from world since they can
851                         # match multiple old-style virtuals but only one of them will be
852                         # pulled in by update or depclean.
853                         providers = portdb.mysettings.getvirtuals().get(
854                                 portage.dep_getkey(system_atom))
855                         if providers and len(providers) == 1 and providers[0] == cp:
856                                 return None
857         return new_world_atom
858
859 def filter_iuse_defaults(iuse):
860         for flag in iuse:
861                 if flag.startswith("+") or flag.startswith("-"):
862                         yield flag[1:]
863                 else:
864                         yield flag
865
866 class SlotObject(object):
867         __slots__ = ("__weakref__",)
868
869         def __init__(self, **kwargs):
870                 classes = [self.__class__]
871                 while classes:
872                         c = classes.pop()
873                         if c is SlotObject:
874                                 continue
875                         classes.extend(c.__bases__)
876                         slots = getattr(c, "__slots__", None)
877                         if not slots:
878                                 continue
879                         for myattr in slots:
880                                 myvalue = kwargs.get(myattr, None)
881                                 setattr(self, myattr, myvalue)
882
883         def copy(self):
884                 """
885                 Create a new instance and copy all attributes
886                 defined from __slots__ (including those from
887                 inherited classes).
888                 """
889                 obj = self.__class__()
890
891                 classes = [self.__class__]
892                 while classes:
893                         c = classes.pop()
894                         if c is SlotObject:
895                                 continue
896                         classes.extend(c.__bases__)
897                         slots = getattr(c, "__slots__", None)
898                         if not slots:
899                                 continue
900                         for myattr in slots:
901                                 setattr(obj, myattr, getattr(self, myattr))
902
903                 return obj
904
905 class AbstractDepPriority(SlotObject):
906         __slots__ = ("buildtime", "runtime", "runtime_post")
907
908         def __lt__(self, other):
909                 return self.__int__() < other
910
911         def __le__(self, other):
912                 return self.__int__() <= other
913
914         def __eq__(self, other):
915                 return self.__int__() == other
916
917         def __ne__(self, other):
918                 return self.__int__() != other
919
920         def __gt__(self, other):
921                 return self.__int__() > other
922
923         def __ge__(self, other):
924                 return self.__int__() >= other
925
926         def copy(self):
927                 import copy
928                 return copy.copy(self)
929
930 class DepPriority(AbstractDepPriority):
931
932         __slots__ = ("satisfied", "optional", "rebuild")
933
934         def __int__(self):
935                 return 0
936
937         def __str__(self):
938                 if self.optional:
939                         return "optional"
940                 if self.buildtime:
941                         return "buildtime"
942                 if self.runtime:
943                         return "runtime"
944                 if self.runtime_post:
945                         return "runtime_post"
946                 return "soft"
947
948 class BlockerDepPriority(DepPriority):
949         __slots__ = ()
950         def __int__(self):
951                 return 0
952
953         def __str__(self):
954                 return 'blocker'
955
956 BlockerDepPriority.instance = BlockerDepPriority()
957
958 class UnmergeDepPriority(AbstractDepPriority):
959         __slots__ = ("optional", "satisfied",)
960         """
961         Combination of properties           Priority  Category
962
963         runtime                                0       HARD
964         runtime_post                          -1       HARD
965         buildtime                             -2       SOFT
966         (none of the above)                   -2       SOFT
967         """
968
969         MAX    =  0
970         SOFT   = -2
971         MIN    = -2
972
973         def __int__(self):
974                 if self.runtime:
975                         return 0
976                 if self.runtime_post:
977                         return -1
978                 if self.buildtime:
979                         return -2
980                 return -2
981
982         def __str__(self):
983                 myvalue = self.__int__()
984                 if myvalue > self.SOFT:
985                         return "hard"
986                 return "soft"
987
988 class DepPriorityNormalRange(object):
989         """
990         DepPriority properties              Index      Category
991
992         buildtime                                      HARD
993         runtime                                3       MEDIUM
994         runtime_post                           2       MEDIUM_SOFT
995         optional                               1       SOFT
996         (none of the above)                    0       NONE
997         """
998         MEDIUM      = 3
999         MEDIUM_SOFT = 2
1000         SOFT        = 1
1001         NONE        = 0
1002
1003         @classmethod
1004         def _ignore_optional(cls, priority):
1005                 if priority.__class__ is not DepPriority:
1006                         return False
1007                 return bool(priority.optional)
1008
1009         @classmethod
1010         def _ignore_runtime_post(cls, priority):
1011                 if priority.__class__ is not DepPriority:
1012                         return False
1013                 return bool(priority.optional or priority.runtime_post)
1014
1015         @classmethod
1016         def _ignore_runtime(cls, priority):
1017                 if priority.__class__ is not DepPriority:
1018                         return False
1019                 return not priority.buildtime
1020
1021         ignore_medium      = _ignore_runtime
1022         ignore_medium_soft = _ignore_runtime_post
1023         ignore_soft        = _ignore_optional
1024
1025 DepPriorityNormalRange.ignore_priority = (
1026         None,
1027         DepPriorityNormalRange._ignore_optional,
1028         DepPriorityNormalRange._ignore_runtime_post,
1029         DepPriorityNormalRange._ignore_runtime
1030 )
1031
1032 class DepPrioritySatisfiedRange(object):
1033         """
1034         DepPriority                         Index      Category
1035
1036         not satisfied and buildtime                    HARD
1037         not satisfied and runtime              7       MEDIUM
1038         not satisfied and runtime_post         6       MEDIUM_SOFT
1039         satisfied and buildtime and rebuild    5       SOFT
1040         satisfied and buildtime                4       SOFT
1041         satisfied and runtime                  3       SOFT
1042         satisfied and runtime_post             2       SOFT
1043         optional                               1       SOFT
1044         (none of the above)                    0       NONE
1045         """
1046         MEDIUM      = 7
1047         MEDIUM_SOFT = 6
1048         SOFT        = 5
1049         NONE        = 0
1050
1051         @classmethod
1052         def _ignore_optional(cls, priority):
1053                 if priority.__class__ is not DepPriority:
1054                         return False
1055                 return bool(priority.optional)
1056
1057         @classmethod
1058         def _ignore_satisfied_runtime_post(cls, priority):
1059                 if priority.__class__ is not DepPriority:
1060                         return False
1061                 if priority.optional:
1062                         return True
1063                 if not priority.satisfied:
1064                         return False
1065                 return bool(priority.runtime_post)
1066
1067         @classmethod
1068         def _ignore_satisfied_runtime(cls, priority):
1069                 if priority.__class__ is not DepPriority:
1070                         return False
1071                 if priority.optional:
1072                         return True
1073                 if not priority.satisfied:
1074                         return False
1075                 return not priority.buildtime
1076
1077         @classmethod
1078         def _ignore_satisfied_buildtime(cls, priority):
1079                 if priority.__class__ is not DepPriority:
1080                         return False
1081                 if priority.optional:
1082                         return True
1083                 if not priority.satisfied:
1084                         return False
1085                 if priority.buildtime:
1086                         return not priority.rebuild
1087                 return True
1088
1089         @classmethod
1090         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1091                 if priority.__class__ is not DepPriority:
1092                         return False
1093                 if priority.optional:
1094                         return True
1095                 return bool(priority.satisfied)
1096
1097         @classmethod
1098         def _ignore_runtime_post(cls, priority):
1099                 if priority.__class__ is not DepPriority:
1100                         return False
1101                 return bool(priority.optional or \
1102                         priority.satisfied or \
1103                         priority.runtime_post)
1104
1105         @classmethod
1106         def _ignore_runtime(cls, priority):
1107                 if priority.__class__ is not DepPriority:
1108                         return False
1109                 return bool(priority.satisfied or \
1110                         not priority.buildtime)
1111
1112         ignore_medium      = _ignore_runtime
1113         ignore_medium_soft = _ignore_runtime_post
1114         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1115
1116 DepPrioritySatisfiedRange.ignore_priority = (
1117         None,
1118         DepPrioritySatisfiedRange._ignore_optional,
1119         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1120         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1121         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1122         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1123         DepPrioritySatisfiedRange._ignore_runtime_post,
1124         DepPrioritySatisfiedRange._ignore_runtime
1125 )
1126
1127 def _find_deep_system_runtime_deps(graph):
1128         deep_system_deps = set()
1129         node_stack = []
1130         for node in graph:
1131                 if not isinstance(node, Package) or \
1132                         node.operation == 'uninstall':
1133                         continue
1134                 if node.root_config.sets['system'].findAtomForPackage(node):
1135                         node_stack.append(node)
1136
1137         def ignore_priority(priority):
1138                 """
1139                 Ignore non-runtime priorities.
1140                 """
1141                 if isinstance(priority, DepPriority) and \
1142                         (priority.runtime or priority.runtime_post):
1143                         return False
1144                 return True
1145
1146         while node_stack:
1147                 node = node_stack.pop()
1148                 if node in deep_system_deps:
1149                         continue
1150                 deep_system_deps.add(node)
1151                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1152                         if not isinstance(child, Package) or \
1153                                 child.operation == 'uninstall':
1154                                 continue
1155                         node_stack.append(child)
1156
1157         return deep_system_deps
1158
1159 class FakeVartree(portage.vartree):
1160         """This is implements an in-memory copy of a vartree instance that provides
1161         all the interfaces required for use by the depgraph.  The vardb is locked
1162         during the constructor call just long enough to read a copy of the
1163         installed package information.  This allows the depgraph to do it's
1164         dependency calculations without holding a lock on the vardb.  It also
1165         allows things like vardb global updates to be done in memory so that the
1166         user doesn't necessarily need write access to the vardb in cases where
1167         global updates are necessary (updates are performed when necessary if there
1168         is not a matching ebuild in the tree)."""
1169         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1170                 self._root_config = root_config
1171                 if pkg_cache is None:
1172                         pkg_cache = {}
1173                 real_vartree = root_config.trees["vartree"]
1174                 portdb = root_config.trees["porttree"].dbapi
1175                 self.root = real_vartree.root
1176                 self.settings = real_vartree.settings
1177                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1178                 if "_mtime_" not in mykeys:
1179                         mykeys.append("_mtime_")
1180                 self._db_keys = mykeys
1181                 self._pkg_cache = pkg_cache
1182                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1183                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1184                 try:
1185                         # At least the parent needs to exist for the lock file.
1186                         portage.util.ensure_dirs(vdb_path)
1187                 except portage.exception.PortageException:
1188                         pass
1189                 vdb_lock = None
1190                 try:
1191                         if acquire_lock and os.access(vdb_path, os.W_OK):
1192                                 vdb_lock = portage.locks.lockdir(vdb_path)
1193                         real_dbapi = real_vartree.dbapi
1194                         slot_counters = {}
1195                         for cpv in real_dbapi.cpv_all():
1196                                 cache_key = ("installed", self.root, cpv, "nomerge")
1197                                 pkg = self._pkg_cache.get(cache_key)
1198                                 if pkg is not None:
1199                                         metadata = pkg.metadata
1200                                 else:
1201                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1202                                 myslot = metadata["SLOT"]
1203                                 mycp = portage.dep_getkey(cpv)
1204                                 myslot_atom = "%s:%s" % (mycp, myslot)
1205                                 try:
1206                                         mycounter = long(metadata["COUNTER"])
1207                                 except ValueError:
1208                                         mycounter = 0
1209                                         metadata["COUNTER"] = str(mycounter)
1210                                 other_counter = slot_counters.get(myslot_atom, None)
1211                                 if other_counter is not None:
1212                                         if other_counter > mycounter:
1213                                                 continue
1214                                 slot_counters[myslot_atom] = mycounter
1215                                 if pkg is None:
1216                                         pkg = Package(built=True, cpv=cpv,
1217                                                 installed=True, metadata=metadata,
1218                                                 root_config=root_config, type_name="installed")
1219                                 self._pkg_cache[pkg] = pkg
1220                                 self.dbapi.cpv_inject(pkg)
1221                         real_dbapi.flush_cache()
1222                 finally:
1223                         if vdb_lock:
1224                                 portage.locks.unlockdir(vdb_lock)
1225                 # Populate the old-style virtuals using the cached values.
1226                 if not self.settings.treeVirtuals:
1227                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1228                                 portage.getCPFromCPV, self.get_all_provides())
1229
1230                 # Intialize variables needed for lazy cache pulls of the live ebuild
1231                 # metadata.  This ensures that the vardb lock is released ASAP, without
1232                 # being delayed in case cache generation is triggered.
1233                 self._aux_get = self.dbapi.aux_get
1234                 self.dbapi.aux_get = self._aux_get_wrapper
1235                 self._match = self.dbapi.match
1236                 self.dbapi.match = self._match_wrapper
1237                 self._aux_get_history = set()
1238                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1239                 self._portdb = portdb
1240                 self._global_updates = None
1241
1242         def _match_wrapper(self, cpv, use_cache=1):
1243                 """
1244                 Make sure the metadata in Package instances gets updated for any
1245                 cpv that is returned from a match() call, since the metadata can
1246                 be accessed directly from the Package instance instead of via
1247                 aux_get().
1248                 """
1249                 matches = self._match(cpv, use_cache=use_cache)
1250                 for cpv in matches:
1251                         if cpv in self._aux_get_history:
1252                                 continue
1253                         self._aux_get_wrapper(cpv, [])
1254                 return matches
1255
1256         def _aux_get_wrapper(self, pkg, wants):
1257                 if pkg in self._aux_get_history:
1258                         return self._aux_get(pkg, wants)
1259                 self._aux_get_history.add(pkg)
1260                 try:
1261                         # Use the live ebuild metadata if possible.
1262                         live_metadata = dict(izip(self._portdb_keys,
1263                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1264                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1265                                 raise KeyError(pkg)
1266                         self.dbapi.aux_update(pkg, live_metadata)
1267                 except (KeyError, portage.exception.PortageException):
1268                         if self._global_updates is None:
1269                                 self._global_updates = \
1270                                         grab_global_updates(self._portdb.porttree_root)
1271                         perform_global_updates(
1272                                 pkg, self.dbapi, self._global_updates)
1273                 return self._aux_get(pkg, wants)
1274
1275         def sync(self, acquire_lock=1):
1276                 """
1277                 Call this method to synchronize state with the real vardb
1278                 after one or more packages may have been installed or
1279                 uninstalled.
1280                 """
1281                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1282                 try:
1283                         # At least the parent needs to exist for the lock file.
1284                         portage.util.ensure_dirs(vdb_path)
1285                 except portage.exception.PortageException:
1286                         pass
1287                 vdb_lock = None
1288                 try:
1289                         if acquire_lock and os.access(vdb_path, os.W_OK):
1290                                 vdb_lock = portage.locks.lockdir(vdb_path)
1291                         self._sync()
1292                 finally:
1293                         if vdb_lock:
1294                                 portage.locks.unlockdir(vdb_lock)
1295
1296         def _sync(self):
1297
1298                 real_vardb = self._root_config.trees["vartree"].dbapi
1299                 current_cpv_set = frozenset(real_vardb.cpv_all())
1300                 pkg_vardb = self.dbapi
1301                 aux_get_history = self._aux_get_history
1302
1303                 # Remove any packages that have been uninstalled.
1304                 for pkg in list(pkg_vardb):
1305                         if pkg.cpv not in current_cpv_set:
1306                                 pkg_vardb.cpv_remove(pkg)
1307                                 aux_get_history.discard(pkg.cpv)
1308
1309                 # Validate counters and timestamps.
1310                 slot_counters = {}
1311                 root = self.root
1312                 validation_keys = ["COUNTER", "_mtime_"]
1313                 for cpv in current_cpv_set:
1314
1315                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1316                         pkg = pkg_vardb.get(pkg_hash_key)
1317                         if pkg is not None:
1318                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1319                                 try:
1320                                         counter = long(counter)
1321                                 except ValueError:
1322                                         counter = 0
1323
1324                                 if counter != pkg.counter or \
1325                                         mtime != pkg.mtime:
1326                                         pkg_vardb.cpv_remove(pkg)
1327                                         aux_get_history.discard(pkg.cpv)
1328                                         pkg = None
1329
1330                         if pkg is None:
1331                                 pkg = self._pkg(cpv)
1332
1333                         other_counter = slot_counters.get(pkg.slot_atom)
1334                         if other_counter is not None:
1335                                 if other_counter > pkg.counter:
1336                                         continue
1337
1338                         slot_counters[pkg.slot_atom] = pkg.counter
1339                         pkg_vardb.cpv_inject(pkg)
1340
1341                 real_vardb.flush_cache()
1342
1343         def _pkg(self, cpv):
1344                 root_config = self._root_config
1345                 real_vardb = root_config.trees["vartree"].dbapi
1346                 pkg = Package(cpv=cpv, installed=True,
1347                         metadata=izip(self._db_keys,
1348                         real_vardb.aux_get(cpv, self._db_keys)),
1349                         root_config=root_config,
1350                         type_name="installed")
1351
1352                 try:
1353                         mycounter = long(pkg.metadata["COUNTER"])
1354                 except ValueError:
1355                         mycounter = 0
1356                         pkg.metadata["COUNTER"] = str(mycounter)
1357
1358                 return pkg
1359
1360 def grab_global_updates(portdir):
1361         from portage.update import grab_updates, parse_updates
1362         updpath = os.path.join(portdir, "profiles", "updates")
1363         try:
1364                 rawupdates = grab_updates(updpath)
1365         except portage.exception.DirectoryNotFound:
1366                 rawupdates = []
1367         upd_commands = []
1368         for mykey, mystat, mycontent in rawupdates:
1369                 commands, errors = parse_updates(mycontent)
1370                 upd_commands.extend(commands)
1371         return upd_commands
1372
1373 def perform_global_updates(mycpv, mydb, mycommands):
1374         from portage.update import update_dbentries
1375         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1376         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1377         updates = update_dbentries(mycommands, aux_dict)
1378         if updates:
1379                 mydb.aux_update(mycpv, updates)
1380
1381 def visible(pkgsettings, pkg):
1382         """
1383         Check if a package is visible. This can raise an InvalidDependString
1384         exception if LICENSE is invalid.
1385         TODO: optionally generate a list of masking reasons
1386         @rtype: Boolean
1387         @returns: True if the package is visible, False otherwise.
1388         """
1389         if not pkg.metadata["SLOT"]:
1390                 return False
1391         if not pkg.installed:
1392                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1393                         return False
1394         eapi = pkg.metadata["EAPI"]
1395         if not portage.eapi_is_supported(eapi):
1396                 return False
1397         if not pkg.installed:
1398                 if portage._eapi_is_deprecated(eapi):
1399                         return False
1400                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1401                         return False
1402         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1403                 return False
1404         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1405                 return False
1406         try:
1407                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1408                         return False
1409         except portage.exception.InvalidDependString:
1410                 return False
1411         return True
1412
1413 def get_masking_status(pkg, pkgsettings, root_config):
1414
1415         mreasons = portage.getmaskingstatus(
1416                 pkg, settings=pkgsettings,
1417                 portdb=root_config.trees["porttree"].dbapi)
1418
1419         if not pkg.installed:
1420                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1421                         mreasons.append("CHOST: %s" % \
1422                                 pkg.metadata["CHOST"])
1423
1424         if not pkg.metadata["SLOT"]:
1425                 mreasons.append("invalid: SLOT is undefined")
1426
1427         return mreasons
1428
1429 def get_mask_info(root_config, cpv, pkgsettings,
1430         db, pkg_type, built, installed, db_keys):
1431         eapi_masked = False
1432         try:
1433                 metadata = dict(izip(db_keys,
1434                         db.aux_get(cpv, db_keys)))
1435         except KeyError:
1436                 metadata = None
1437         if metadata and not built:
1438                 pkgsettings.setcpv(cpv, mydb=metadata)
1439                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1440                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1441         if metadata is None:
1442                 mreasons = ["corruption"]
1443         else:
1444                 eapi = metadata['EAPI']
1445                 if eapi[:1] == '-':
1446                         eapi = eapi[1:]
1447                 if not portage.eapi_is_supported(eapi):
1448                         mreasons = ['EAPI %s' % eapi]
1449                 else:
1450                         pkg = Package(type_name=pkg_type, root_config=root_config,
1451                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1452                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1453         return metadata, mreasons
1454
1455 def show_masked_packages(masked_packages):
1456         shown_licenses = set()
1457         shown_comments = set()
1458         # Maybe there is both an ebuild and a binary. Only
1459         # show one of them to avoid redundant appearance.
1460         shown_cpvs = set()
1461         have_eapi_mask = False
1462         for (root_config, pkgsettings, cpv,
1463                 metadata, mreasons) in masked_packages:
1464                 if cpv in shown_cpvs:
1465                         continue
1466                 shown_cpvs.add(cpv)
1467                 comment, filename = None, None
1468                 if "package.mask" in mreasons:
1469                         comment, filename = \
1470                                 portage.getmaskingreason(
1471                                 cpv, metadata=metadata,
1472                                 settings=pkgsettings,
1473                                 portdb=root_config.trees["porttree"].dbapi,
1474                                 return_location=True)
1475                 missing_licenses = []
1476                 if metadata:
1477                         if not portage.eapi_is_supported(metadata["EAPI"]):
1478                                 have_eapi_mask = True
1479                         try:
1480                                 missing_licenses = \
1481                                         pkgsettings._getMissingLicenses(
1482                                                 cpv, metadata)
1483                         except portage.exception.InvalidDependString:
1484                                 # This will have already been reported
1485                                 # above via mreasons.
1486                                 pass
1487
1488                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1489                 if comment and comment not in shown_comments:
1490                         print filename+":"
1491                         print comment
1492                         shown_comments.add(comment)
1493                 portdb = root_config.trees["porttree"].dbapi
1494                 for l in missing_licenses:
1495                         l_path = portdb.findLicensePath(l)
1496                         if l in shown_licenses:
1497                                 continue
1498                         msg = ("A copy of the '%s' license" + \
1499                         " is located at '%s'.") % (l, l_path)
1500                         print msg
1501                         print
1502                         shown_licenses.add(l)
1503         return have_eapi_mask
1504
1505 class Task(SlotObject):
1506         __slots__ = ("_hash_key", "_hash_value")
1507
1508         def _get_hash_key(self):
1509                 hash_key = getattr(self, "_hash_key", None)
1510                 if hash_key is None:
1511                         raise NotImplementedError(self)
1512                 return hash_key
1513
1514         def __eq__(self, other):
1515                 return self._get_hash_key() == other
1516
1517         def __ne__(self, other):
1518                 return self._get_hash_key() != other
1519
1520         def __hash__(self):
1521                 hash_value = getattr(self, "_hash_value", None)
1522                 if hash_value is None:
1523                         self._hash_value = hash(self._get_hash_key())
1524                 return self._hash_value
1525
1526         def __len__(self):
1527                 return len(self._get_hash_key())
1528
1529         def __getitem__(self, key):
1530                 return self._get_hash_key()[key]
1531
1532         def __iter__(self):
1533                 return iter(self._get_hash_key())
1534
1535         def __contains__(self, key):
1536                 return key in self._get_hash_key()
1537
1538         def __str__(self):
1539                 return str(self._get_hash_key())
1540
1541 class Blocker(Task):
1542
1543         __hash__ = Task.__hash__
1544         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1545
1546         def __init__(self, **kwargs):
1547                 Task.__init__(self, **kwargs)
1548                 self.cp = portage.dep_getkey(self.atom)
1549
1550         def _get_hash_key(self):
1551                 hash_key = getattr(self, "_hash_key", None)
1552                 if hash_key is None:
1553                         self._hash_key = \
1554                                 ("blocks", self.root, self.atom, self.eapi)
1555                 return self._hash_key
1556
1557 class Package(Task):
1558
1559         __hash__ = Task.__hash__
1560         __slots__ = ("built", "cpv", "depth",
1561                 "installed", "metadata", "onlydeps", "operation",
1562                 "root_config", "type_name",
1563                 "category", "counter", "cp", "cpv_split",
1564                 "inherited", "iuse", "mtime",
1565                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1566
1567         metadata_keys = [
1568                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1569                 "INHERITED", "IUSE", "KEYWORDS",
1570                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1571                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1572
1573         def __init__(self, **kwargs):
1574                 Task.__init__(self, **kwargs)
1575                 self.root = self.root_config.root
1576                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1577                 self.cp = portage.cpv_getkey(self.cpv)
1578                 slot = self.slot
1579                 if not slot:
1580                         # Avoid an InvalidAtom exception when creating slot_atom.
1581                         # This package instance will be masked due to empty SLOT.
1582                         slot = '0'
1583                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1584                 self.category, self.pf = portage.catsplit(self.cpv)
1585                 self.cpv_split = portage.catpkgsplit(self.cpv)
1586                 self.pv_split = self.cpv_split[1:]
1587
1588         class _use(object):
1589
1590                 __slots__ = ("__weakref__", "enabled")
1591
1592                 def __init__(self, use):
1593                         self.enabled = frozenset(use)
1594
1595         class _iuse(object):
1596
1597                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1598
1599                 def __init__(self, tokens, iuse_implicit):
1600                         self.tokens = tuple(tokens)
1601                         self.iuse_implicit = iuse_implicit
1602                         enabled = []
1603                         disabled = []
1604                         other = []
1605                         for x in tokens:
1606                                 prefix = x[:1]
1607                                 if prefix == "+":
1608                                         enabled.append(x[1:])
1609                                 elif prefix == "-":
1610                                         disabled.append(x[1:])
1611                                 else:
1612                                         other.append(x)
1613                         self.enabled = frozenset(enabled)
1614                         self.disabled = frozenset(disabled)
1615                         self.all = frozenset(chain(enabled, disabled, other))
1616
1617                 def __getattribute__(self, name):
1618                         if name == "regex":
1619                                 try:
1620                                         return object.__getattribute__(self, "regex")
1621                                 except AttributeError:
1622                                         all = object.__getattribute__(self, "all")
1623                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1624                                         # Escape anything except ".*" which is supposed
1625                                         # to pass through from _get_implicit_iuse()
1626                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1627                                         regex = "^(%s)$" % "|".join(regex)
1628                                         regex = regex.replace("\\.\\*", ".*")
1629                                         self.regex = re.compile(regex)
1630                         return object.__getattribute__(self, name)
1631
1632         def _get_hash_key(self):
1633                 hash_key = getattr(self, "_hash_key", None)
1634                 if hash_key is None:
1635                         if self.operation is None:
1636                                 self.operation = "merge"
1637                                 if self.onlydeps or self.installed:
1638                                         self.operation = "nomerge"
1639                         self._hash_key = \
1640                                 (self.type_name, self.root, self.cpv, self.operation)
1641                 return self._hash_key
1642
1643         def __lt__(self, other):
1644                 if other.cp != self.cp:
1645                         return False
1646                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1647                         return True
1648                 return False
1649
1650         def __le__(self, other):
1651                 if other.cp != self.cp:
1652                         return False
1653                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1654                         return True
1655                 return False
1656
1657         def __gt__(self, other):
1658                 if other.cp != self.cp:
1659                         return False
1660                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1661                         return True
1662                 return False
1663
1664         def __ge__(self, other):
1665                 if other.cp != self.cp:
1666                         return False
1667                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1668                         return True
1669                 return False
1670
1671 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1672         if not x.startswith("UNUSED_"))
1673 _all_metadata_keys.discard("CDEPEND")
1674 _all_metadata_keys.update(Package.metadata_keys)
1675
1676 from portage.cache.mappings import slot_dict_class
1677 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1678
1679 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1680         """
1681         Detect metadata updates and synchronize Package attributes.
1682         """
1683
1684         __slots__ = ("_pkg",)
1685         _wrapped_keys = frozenset(
1686                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1687
1688         def __init__(self, pkg, metadata):
1689                 _PackageMetadataWrapperBase.__init__(self)
1690                 self._pkg = pkg
1691                 self.update(metadata)
1692
1693         def __setitem__(self, k, v):
1694                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1695                 if k in self._wrapped_keys:
1696                         getattr(self, "_set_" + k.lower())(k, v)
1697
1698         def _set_inherited(self, k, v):
1699                 if isinstance(v, basestring):
1700                         v = frozenset(v.split())
1701                 self._pkg.inherited = v
1702
1703         def _set_iuse(self, k, v):
1704                 self._pkg.iuse = self._pkg._iuse(
1705                         v.split(), self._pkg.root_config.iuse_implicit)
1706
1707         def _set_slot(self, k, v):
1708                 self._pkg.slot = v
1709
1710         def _set_use(self, k, v):
1711                 self._pkg.use = self._pkg._use(v.split())
1712
1713         def _set_counter(self, k, v):
1714                 if isinstance(v, basestring):
1715                         try:
1716                                 v = long(v.strip())
1717                         except ValueError:
1718                                 v = 0
1719                 self._pkg.counter = v
1720
1721         def _set__mtime_(self, k, v):
1722                 if isinstance(v, basestring):
1723                         try:
1724                                 v = long(v.strip())
1725                         except ValueError:
1726                                 v = 0
1727                 self._pkg.mtime = v
1728
1729 class EbuildFetchonly(SlotObject):
1730
1731         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1732
1733         def execute(self):
1734                 settings = self.settings
1735                 pkg = self.pkg
1736                 portdb = pkg.root_config.trees["porttree"].dbapi
1737                 ebuild_path = portdb.findname(pkg.cpv)
1738                 settings.setcpv(pkg)
1739                 debug = settings.get("PORTAGE_DEBUG") == "1"
1740                 use_cache = 1 # always true
1741                 portage.doebuild_environment(ebuild_path, "fetch",
1742                         settings["ROOT"], settings, debug, use_cache, portdb)
1743                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1744
1745                 if restrict_fetch:
1746                         rval = self._execute_with_builddir()
1747                 else:
1748                         rval = portage.doebuild(ebuild_path, "fetch",
1749                                 settings["ROOT"], settings, debug=debug,
1750                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1751                                 mydbapi=portdb, tree="porttree")
1752
1753                         if rval != os.EX_OK:
1754                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1755                                 eerror(msg, phase="unpack", key=pkg.cpv)
1756
1757                 return rval
1758
1759         def _execute_with_builddir(self):
1760                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1761                 # ensuring sane $PWD (bug #239560) and storing elog
1762                 # messages. Use a private temp directory, in order
1763                 # to avoid locking the main one.
1764                 settings = self.settings
1765                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1766                 from tempfile import mkdtemp
1767                 try:
1768                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1769                 except OSError, e:
1770                         if e.errno != portage.exception.PermissionDenied.errno:
1771                                 raise
1772                         raise portage.exception.PermissionDenied(global_tmpdir)
1773                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1774                 settings.backup_changes("PORTAGE_TMPDIR")
1775                 try:
1776                         retval = self._execute()
1777                 finally:
1778                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1779                         settings.backup_changes("PORTAGE_TMPDIR")
1780                         shutil.rmtree(private_tmpdir)
1781                 return retval
1782
1783         def _execute(self):
1784                 settings = self.settings
1785                 pkg = self.pkg
1786                 root_config = pkg.root_config
1787                 portdb = root_config.trees["porttree"].dbapi
1788                 ebuild_path = portdb.findname(pkg.cpv)
1789                 debug = settings.get("PORTAGE_DEBUG") == "1"
1790                 retval = portage.doebuild(ebuild_path, "fetch",
1791                         self.settings["ROOT"], self.settings, debug=debug,
1792                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1793                         mydbapi=portdb, tree="porttree")
1794
1795                 if retval != os.EX_OK:
1796                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1797                         eerror(msg, phase="unpack", key=pkg.cpv)
1798
1799                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1800                 return retval
1801
1802 class PollConstants(object):
1803
1804         """
1805         Provides POLL* constants that are equivalent to those from the
1806         select module, for use by PollSelectAdapter.
1807         """
1808
1809         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1810         v = 1
1811         for k in names:
1812                 locals()[k] = getattr(select, k, v)
1813                 v *= 2
1814         del k, v
1815
1816 class AsynchronousTask(SlotObject):
1817         """
1818         Subclasses override _wait() and _poll() so that calls
1819         to public methods can be wrapped for implementing
1820         hooks such as exit listener notification.
1821
1822         Sublasses should call self.wait() to notify exit listeners after
1823         the task is complete and self.returncode has been set.
1824         """
1825
1826         __slots__ = ("background", "cancelled", "returncode") + \
1827                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1828
1829         def start(self):
1830                 """
1831                 Start an asynchronous task and then return as soon as possible.
1832                 """
1833                 self._start_hook()
1834                 self._start()
1835
1836         def _start(self):
1837                 raise NotImplementedError(self)
1838
1839         def isAlive(self):
1840                 return self.returncode is None
1841
1842         def poll(self):
1843                 self._wait_hook()
1844                 return self._poll()
1845
1846         def _poll(self):
1847                 return self.returncode
1848
1849         def wait(self):
1850                 if self.returncode is None:
1851                         self._wait()
1852                 self._wait_hook()
1853                 return self.returncode
1854
1855         def _wait(self):
1856                 return self.returncode
1857
1858         def cancel(self):
1859                 self.cancelled = True
1860                 self.wait()
1861
1862         def addStartListener(self, f):
1863                 """
1864                 The function will be called with one argument, a reference to self.
1865                 """
1866                 if self._start_listeners is None:
1867                         self._start_listeners = []
1868                 self._start_listeners.append(f)
1869
1870         def removeStartListener(self, f):
1871                 if self._start_listeners is None:
1872                         return
1873                 self._start_listeners.remove(f)
1874
1875         def _start_hook(self):
1876                 if self._start_listeners is not None:
1877                         start_listeners = self._start_listeners
1878                         self._start_listeners = None
1879
1880                         for f in start_listeners:
1881                                 f(self)
1882
1883         def addExitListener(self, f):
1884                 """
1885                 The function will be called with one argument, a reference to self.
1886                 """
1887                 if self._exit_listeners is None:
1888                         self._exit_listeners = []
1889                 self._exit_listeners.append(f)
1890
1891         def removeExitListener(self, f):
1892                 if self._exit_listeners is None:
1893                         if self._exit_listener_stack is not None:
1894                                 self._exit_listener_stack.remove(f)
1895                         return
1896                 self._exit_listeners.remove(f)
1897
1898         def _wait_hook(self):
1899                 """
1900                 Call this method after the task completes, just before returning
1901                 the returncode from wait() or poll(). This hook is
1902                 used to trigger exit listeners when the returncode first
1903                 becomes available.
1904                 """
1905                 if self.returncode is not None and \
1906                         self._exit_listeners is not None:
1907
1908                         # This prevents recursion, in case one of the
1909                         # exit handlers triggers this method again by
1910                         # calling wait(). Use a stack that gives
1911                         # removeExitListener() an opportunity to consume
1912                         # listeners from the stack, before they can get
1913                         # called below. This is necessary because a call
1914                         # to one exit listener may result in a call to
1915                         # removeExitListener() for another listener on
1916                         # the stack. That listener needs to be removed
1917                         # from the stack since it would be inconsistent
1918                         # to call it after it has been been passed into
1919                         # removeExitListener().
1920                         self._exit_listener_stack = self._exit_listeners
1921                         self._exit_listeners = None
1922
1923                         self._exit_listener_stack.reverse()
1924                         while self._exit_listener_stack:
1925                                 self._exit_listener_stack.pop()(self)
1926
1927 class AbstractPollTask(AsynchronousTask):
1928
1929         __slots__ = ("scheduler",) + \
1930                 ("_registered",)
1931
1932         _bufsize = 4096
1933         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1934         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1935                 _exceptional_events
1936
1937         def _unregister(self):
1938                 raise NotImplementedError(self)
1939
1940         def _unregister_if_appropriate(self, event):
1941                 if self._registered:
1942                         if event & self._exceptional_events:
1943                                 self._unregister()
1944                                 self.cancel()
1945                         elif event & PollConstants.POLLHUP:
1946                                 self._unregister()
1947                                 self.wait()
1948
1949 class PipeReader(AbstractPollTask):
1950
1951         """
1952         Reads output from one or more files and saves it in memory,
1953         for retrieval via the getvalue() method. This is driven by
1954         the scheduler's poll() loop, so it runs entirely within the
1955         current process.
1956         """
1957
1958         __slots__ = ("input_files",) + \
1959                 ("_read_data", "_reg_ids")
1960
1961         def _start(self):
1962                 self._reg_ids = set()
1963                 self._read_data = []
1964                 for k, f in self.input_files.iteritems():
1965                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1966                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1967                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1968                                 self._registered_events, self._output_handler))
1969                 self._registered = True
1970
1971         def isAlive(self):
1972                 return self._registered
1973
1974         def cancel(self):
1975                 if self.returncode is None:
1976                         self.returncode = 1
1977                         self.cancelled = True
1978                 self.wait()
1979
1980         def _wait(self):
1981                 if self.returncode is not None:
1982                         return self.returncode
1983
1984                 if self._registered:
1985                         self.scheduler.schedule(self._reg_ids)
1986                         self._unregister()
1987
1988                 self.returncode = os.EX_OK
1989                 return self.returncode
1990
1991         def getvalue(self):
1992                 """Retrieve the entire contents"""
1993                 if sys.hexversion >= 0x3000000:
1994                         return bytes().join(self._read_data)
1995                 return "".join(self._read_data)
1996
1997         def close(self):
1998                 """Free the memory buffer."""
1999                 self._read_data = None
2000
2001         def _output_handler(self, fd, event):
2002
2003                 if event & PollConstants.POLLIN:
2004
2005                         for f in self.input_files.itervalues():
2006                                 if fd == f.fileno():
2007                                         break
2008
2009                         buf = array.array('B')
2010                         try:
2011                                 buf.fromfile(f, self._bufsize)
2012                         except EOFError:
2013                                 pass
2014
2015                         if buf:
2016                                 self._read_data.append(buf.tostring())
2017                         else:
2018                                 self._unregister()
2019                                 self.wait()
2020
2021                 self._unregister_if_appropriate(event)
2022                 return self._registered
2023
2024         def _unregister(self):
2025                 """
2026                 Unregister from the scheduler and close open files.
2027                 """
2028
2029                 self._registered = False
2030
2031                 if self._reg_ids is not None:
2032                         for reg_id in self._reg_ids:
2033                                 self.scheduler.unregister(reg_id)
2034                         self._reg_ids = None
2035
2036                 if self.input_files is not None:
2037                         for f in self.input_files.itervalues():
2038                                 f.close()
2039                         self.input_files = None
2040
2041 class CompositeTask(AsynchronousTask):
2042
2043         __slots__ = ("scheduler",) + ("_current_task",)
2044
2045         def isAlive(self):
2046                 return self._current_task is not None
2047
2048         def cancel(self):
2049                 self.cancelled = True
2050                 if self._current_task is not None:
2051                         self._current_task.cancel()
2052
2053         def _poll(self):
2054                 """
2055                 This does a loop calling self._current_task.poll()
2056                 repeatedly as long as the value of self._current_task
2057                 keeps changing. It calls poll() a maximum of one time
2058                 for a given self._current_task instance. This is useful
2059                 since calling poll() on a task can trigger advance to
2060                 the next task could eventually lead to the returncode
2061                 being set in cases when polling only a single task would
2062                 not have the same effect.
2063                 """
2064
2065                 prev = None
2066                 while True:
2067                         task = self._current_task
2068                         if task is None or task is prev:
2069                                 # don't poll the same task more than once
2070                                 break
2071                         task.poll()
2072                         prev = task
2073
2074                 return self.returncode
2075
2076         def _wait(self):
2077
2078                 prev = None
2079                 while True:
2080                         task = self._current_task
2081                         if task is None:
2082                                 # don't wait for the same task more than once
2083                                 break
2084                         if task is prev:
2085                                 # Before the task.wait() method returned, an exit
2086                                 # listener should have set self._current_task to either
2087                                 # a different task or None. Something is wrong.
2088                                 raise AssertionError("self._current_task has not " + \
2089                                         "changed since calling wait", self, task)
2090                         task.wait()
2091                         prev = task
2092
2093                 return self.returncode
2094
2095         def _assert_current(self, task):
2096                 """
2097                 Raises an AssertionError if the given task is not the
2098                 same one as self._current_task. This can be useful
2099                 for detecting bugs.
2100                 """
2101                 if task is not self._current_task:
2102                         raise AssertionError("Unrecognized task: %s" % (task,))
2103
2104         def _default_exit(self, task):
2105                 """
2106                 Calls _assert_current() on the given task and then sets the
2107                 composite returncode attribute if task.returncode != os.EX_OK.
2108                 If the task failed then self._current_task will be set to None.
2109                 Subclasses can use this as a generic task exit callback.
2110
2111                 @rtype: int
2112                 @returns: The task.returncode attribute.
2113                 """
2114                 self._assert_current(task)
2115                 if task.returncode != os.EX_OK:
2116                         self.returncode = task.returncode
2117                         self._current_task = None
2118                 return task.returncode
2119
2120         def _final_exit(self, task):
2121                 """
2122                 Assumes that task is the final task of this composite task.
2123                 Calls _default_exit() and sets self.returncode to the task's
2124                 returncode and sets self._current_task to None.
2125                 """
2126                 self._default_exit(task)
2127                 self._current_task = None
2128                 self.returncode = task.returncode
2129                 return self.returncode
2130
2131         def _default_final_exit(self, task):
2132                 """
2133                 This calls _final_exit() and then wait().
2134
2135                 Subclasses can use this as a generic final task exit callback.
2136
2137                 """
2138                 self._final_exit(task)
2139                 return self.wait()
2140
2141         def _start_task(self, task, exit_handler):
2142                 """
2143                 Register exit handler for the given task, set it
2144                 as self._current_task, and call task.start().
2145
2146                 Subclasses can use this as a generic way to start
2147                 a task.
2148
2149                 """
2150                 task.addExitListener(exit_handler)
2151                 self._current_task = task
2152                 task.start()
2153
2154 class TaskSequence(CompositeTask):
2155         """
2156         A collection of tasks that executes sequentially. Each task
2157         must have a addExitListener() method that can be used as
2158         a means to trigger movement from one task to the next.
2159         """
2160
2161         __slots__ = ("_task_queue",)
2162
2163         def __init__(self, **kwargs):
2164                 AsynchronousTask.__init__(self, **kwargs)
2165                 self._task_queue = deque()
2166
2167         def add(self, task):
2168                 self._task_queue.append(task)
2169
2170         def _start(self):
2171                 self._start_next_task()
2172
2173         def cancel(self):
2174                 self._task_queue.clear()
2175                 CompositeTask.cancel(self)
2176
2177         def _start_next_task(self):
2178                 self._start_task(self._task_queue.popleft(),
2179                         self._task_exit_handler)
2180
2181         def _task_exit_handler(self, task):
2182                 if self._default_exit(task) != os.EX_OK:
2183                         self.wait()
2184                 elif self._task_queue:
2185                         self._start_next_task()
2186                 else:
2187                         self._final_exit(task)
2188                         self.wait()
2189
2190 class SubProcess(AbstractPollTask):
2191
2192         __slots__ = ("pid",) + \
2193                 ("_files", "_reg_id")
2194
2195         # A file descriptor is required for the scheduler to monitor changes from
2196         # inside a poll() loop. When logging is not enabled, create a pipe just to
2197         # serve this purpose alone.
2198         _dummy_pipe_fd = 9
2199
2200         def _poll(self):
2201                 if self.returncode is not None:
2202                         return self.returncode
2203                 if self.pid is None:
2204                         return self.returncode
2205                 if self._registered:
2206                         return self.returncode
2207
2208                 try:
2209                         retval = os.waitpid(self.pid, os.WNOHANG)
2210                 except OSError, e:
2211                         if e.errno != errno.ECHILD:
2212                                 raise
2213                         del e
2214                         retval = (self.pid, 1)
2215
2216                 if retval == (0, 0):
2217                         return None
2218                 self._set_returncode(retval)
2219                 return self.returncode
2220
2221         def cancel(self):
2222                 if self.isAlive():
2223                         try:
2224                                 os.kill(self.pid, signal.SIGTERM)
2225                         except OSError, e:
2226                                 if e.errno != errno.ESRCH:
2227                                         raise
2228                                 del e
2229
2230                 self.cancelled = True
2231                 if self.pid is not None:
2232                         self.wait()
2233                 return self.returncode
2234
2235         def isAlive(self):
2236                 return self.pid is not None and \
2237                         self.returncode is None
2238
2239         def _wait(self):
2240
2241                 if self.returncode is not None:
2242                         return self.returncode
2243
2244                 if self._registered:
2245                         self.scheduler.schedule(self._reg_id)
2246                         self._unregister()
2247                         if self.returncode is not None:
2248                                 return self.returncode
2249
2250                 try:
2251                         wait_retval = os.waitpid(self.pid, 0)
2252                 except OSError, e:
2253                         if e.errno != errno.ECHILD:
2254                                 raise
2255                         del e
2256                         self._set_returncode((self.pid, 1))
2257                 else:
2258                         self._set_returncode(wait_retval)
2259
2260                 return self.returncode
2261
2262         def _unregister(self):
2263                 """
2264                 Unregister from the scheduler and close open files.
2265                 """
2266
2267                 self._registered = False
2268
2269                 if self._reg_id is not None:
2270                         self.scheduler.unregister(self._reg_id)
2271                         self._reg_id = None
2272
2273                 if self._files is not None:
2274                         for f in self._files.itervalues():
2275                                 f.close()
2276                         self._files = None
2277
2278         def _set_returncode(self, wait_retval):
2279
2280                 retval = wait_retval[1]
2281
2282                 if retval != os.EX_OK:
2283                         if retval & 0xff:
2284                                 retval = (retval & 0xff) << 8
2285                         else:
2286                                 retval = retval >> 8
2287
2288                 self.returncode = retval
2289
2290 class SpawnProcess(SubProcess):
2291
2292         """
2293         Constructor keyword args are passed into portage.process.spawn().
2294         The required "args" keyword argument will be passed as the first
2295         spawn() argument.
2296         """
2297
2298         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2299                 "uid", "gid", "groups", "umask", "logfile",
2300                 "path_lookup", "pre_exec")
2301
2302         __slots__ = ("args",) + \
2303                 _spawn_kwarg_names
2304
2305         _file_names = ("log", "process", "stdout")
2306         _files_dict = slot_dict_class(_file_names, prefix="")
2307
2308         def _start(self):
2309
2310                 if self.cancelled:
2311                         return
2312
2313                 if self.fd_pipes is None:
2314                         self.fd_pipes = {}
2315                 fd_pipes = self.fd_pipes
2316                 fd_pipes.setdefault(0, sys.stdin.fileno())
2317                 fd_pipes.setdefault(1, sys.stdout.fileno())
2318                 fd_pipes.setdefault(2, sys.stderr.fileno())
2319
2320                 # flush any pending output
2321                 for fd in fd_pipes.itervalues():
2322                         if fd == sys.stdout.fileno():
2323                                 sys.stdout.flush()
2324                         if fd == sys.stderr.fileno():
2325                                 sys.stderr.flush()
2326
2327                 logfile = self.logfile
2328                 self._files = self._files_dict()
2329                 files = self._files
2330
2331                 master_fd, slave_fd = self._pipe(fd_pipes)
2332                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2333                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2334
2335                 null_input = None
2336                 fd_pipes_orig = fd_pipes.copy()
2337                 if self.background:
2338                         # TODO: Use job control functions like tcsetpgrp() to control
2339                         # access to stdin. Until then, use /dev/null so that any
2340                         # attempts to read from stdin will immediately return EOF
2341                         # instead of blocking indefinitely.
2342                         null_input = open('/dev/null', 'rb')
2343                         fd_pipes[0] = null_input.fileno()
2344                 else:
2345                         fd_pipes[0] = fd_pipes_orig[0]
2346
2347                 files.process = os.fdopen(master_fd, 'rb')
2348                 if logfile is not None:
2349
2350                         fd_pipes[1] = slave_fd
2351                         fd_pipes[2] = slave_fd
2352
2353                         files.log = open(logfile, mode='ab')
2354                         portage.util.apply_secpass_permissions(logfile,
2355                                 uid=portage.portage_uid, gid=portage.portage_gid,
2356                                 mode=0660)
2357
2358                         if not self.background:
2359                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2360
2361                         output_handler = self._output_handler
2362
2363                 else:
2364
2365                         # Create a dummy pipe so the scheduler can monitor
2366                         # the process from inside a poll() loop.
2367                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2368                         if self.background:
2369                                 fd_pipes[1] = slave_fd
2370                                 fd_pipes[2] = slave_fd
2371                         output_handler = self._dummy_handler
2372
2373                 kwargs = {}
2374                 for k in self._spawn_kwarg_names:
2375                         v = getattr(self, k)
2376                         if v is not None:
2377                                 kwargs[k] = v
2378
2379                 kwargs["fd_pipes"] = fd_pipes
2380                 kwargs["returnpid"] = True
2381                 kwargs.pop("logfile", None)
2382
2383                 self._reg_id = self.scheduler.register(files.process.fileno(),
2384                         self._registered_events, output_handler)
2385                 self._registered = True
2386
2387                 retval = self._spawn(self.args, **kwargs)
2388
2389                 os.close(slave_fd)
2390                 if null_input is not None:
2391                         null_input.close()
2392
2393                 if isinstance(retval, int):
2394                         # spawn failed
2395                         self._unregister()
2396                         self.returncode = retval
2397                         self.wait()
2398                         return
2399
2400                 self.pid = retval[0]
2401                 portage.process.spawned_pids.remove(self.pid)
2402
2403         def _pipe(self, fd_pipes):
2404                 """
2405                 @type fd_pipes: dict
2406                 @param fd_pipes: pipes from which to copy terminal size if desired.
2407                 """
2408                 return os.pipe()
2409
2410         def _spawn(self, args, **kwargs):
2411                 return portage.process.spawn(args, **kwargs)
2412
2413         def _output_handler(self, fd, event):
2414
2415                 if event & PollConstants.POLLIN:
2416
2417                         files = self._files
2418                         buf = array.array('B')
2419                         try:
2420                                 buf.fromfile(files.process, self._bufsize)
2421                         except EOFError:
2422                                 pass
2423
2424                         if buf:
2425                                 if not self.background:
2426                                         buf.tofile(files.stdout)
2427                                         files.stdout.flush()
2428                                 buf.tofile(files.log)
2429                                 files.log.flush()
2430                         else:
2431                                 self._unregister()
2432                                 self.wait()
2433
2434                 self._unregister_if_appropriate(event)
2435                 return self._registered
2436
2437         def _dummy_handler(self, fd, event):
2438                 """
2439                 This method is mainly interested in detecting EOF, since
2440                 the only purpose of the pipe is to allow the scheduler to
2441                 monitor the process from inside a poll() loop.
2442                 """
2443
2444                 if event & PollConstants.POLLIN:
2445
2446                         buf = array.array('B')
2447                         try:
2448                                 buf.fromfile(self._files.process, self._bufsize)
2449                         except EOFError:
2450                                 pass
2451
2452                         if buf:
2453                                 pass
2454                         else:
2455                                 self._unregister()
2456                                 self.wait()
2457
2458                 self._unregister_if_appropriate(event)
2459                 return self._registered
2460
2461 class MiscFunctionsProcess(SpawnProcess):
2462         """
2463         Spawns misc-functions.sh with an existing ebuild environment.
2464         """
2465
2466         __slots__ = ("commands", "phase", "pkg", "settings")
2467
2468         def _start(self):
2469                 settings = self.settings
2470                 settings.pop("EBUILD_PHASE", None)
2471                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2472                 misc_sh_binary = os.path.join(portage_bin_path,
2473                         os.path.basename(portage.const.MISC_SH_BINARY))
2474
2475                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2476                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2477
2478                 portage._doebuild_exit_status_unlink(
2479                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2480
2481                 SpawnProcess._start(self)
2482
2483         def _spawn(self, args, **kwargs):
2484                 settings = self.settings
2485                 debug = settings.get("PORTAGE_DEBUG") == "1"
2486                 return portage.spawn(" ".join(args), settings,
2487                         debug=debug, **kwargs)
2488
2489         def _set_returncode(self, wait_retval):
2490                 SpawnProcess._set_returncode(self, wait_retval)
2491                 self.returncode = portage._doebuild_exit_status_check_and_log(
2492                         self.settings, self.phase, self.returncode)
2493
2494 class EbuildFetcher(SpawnProcess):
2495
2496         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2497                 ("_build_dir",)
2498
2499         def _start(self):
2500
2501                 root_config = self.pkg.root_config
2502                 portdb = root_config.trees["porttree"].dbapi
2503                 ebuild_path = portdb.findname(self.pkg.cpv)
2504                 settings = self.config_pool.allocate()
2505                 settings.setcpv(self.pkg)
2506
2507                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2508                 # should not be touched since otherwise it could interfere with
2509                 # another instance of the same cpv concurrently being built for a
2510                 # different $ROOT (currently, builds only cooperate with prefetchers
2511                 # that are spawned for the same $ROOT).
2512                 if not self.prefetch:
2513                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2514                         self._build_dir.lock()
2515                         self._build_dir.clean_log()
2516                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2517                         if self.logfile is None:
2518                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2519
2520                 phase = "fetch"
2521                 if self.fetchall:
2522                         phase = "fetchall"
2523
2524                 # If any incremental variables have been overridden
2525                 # via the environment, those values need to be passed
2526                 # along here so that they are correctly considered by
2527                 # the config instance in the subproccess.
2528                 fetch_env = os.environ.copy()
2529
2530                 nocolor = settings.get("NOCOLOR")
2531                 if nocolor is not None:
2532                         fetch_env["NOCOLOR"] = nocolor
2533
2534                 fetch_env["PORTAGE_NICENESS"] = "0"
2535                 if self.prefetch:
2536                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2537
2538                 ebuild_binary = os.path.join(
2539                         settings["PORTAGE_BIN_PATH"], "ebuild")
2540
2541                 fetch_args = [ebuild_binary, ebuild_path, phase]
2542                 debug = settings.get("PORTAGE_DEBUG") == "1"
2543                 if debug:
2544                         fetch_args.append("--debug")
2545
2546                 self.args = fetch_args
2547                 self.env = fetch_env
2548                 SpawnProcess._start(self)
2549
2550         def _pipe(self, fd_pipes):
2551                 """When appropriate, use a pty so that fetcher progress bars,
2552                 like wget has, will work properly."""
2553                 if self.background or not sys.stdout.isatty():
2554                         # When the output only goes to a log file,
2555                         # there's no point in creating a pty.
2556                         return os.pipe()
2557                 stdout_pipe = fd_pipes.get(1)
2558                 got_pty, master_fd, slave_fd = \
2559                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2560                 return (master_fd, slave_fd)
2561
2562         def _set_returncode(self, wait_retval):
2563                 SpawnProcess._set_returncode(self, wait_retval)
2564                 # Collect elog messages that might have been
2565                 # created by the pkg_nofetch phase.
2566                 if self._build_dir is not None:
2567                         # Skip elog messages for prefetch, in order to avoid duplicates.
2568                         if not self.prefetch and self.returncode != os.EX_OK:
2569                                 elog_out = None
2570                                 if self.logfile is not None:
2571                                         if self.background:
2572                                                 elog_out = open(self.logfile, 'a')
2573                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2574                                 if self.logfile is not None:
2575                                         msg += ", Log file:"
2576                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2577                                 if self.logfile is not None:
2578                                         eerror(" '%s'" % (self.logfile,),
2579                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2580                                 if elog_out is not None:
2581                                         elog_out.close()
2582                         if not self.prefetch:
2583                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2584                         features = self._build_dir.settings.features
2585                         if self.returncode == os.EX_OK:
2586                                 self._build_dir.clean_log()
2587                         self._build_dir.unlock()
2588                         self.config_pool.deallocate(self._build_dir.settings)
2589                         self._build_dir = None
2590
2591 class EbuildBuildDir(SlotObject):
2592
2593         __slots__ = ("dir_path", "pkg", "settings",
2594                 "locked", "_catdir", "_lock_obj")
2595
2596         def __init__(self, **kwargs):
2597                 SlotObject.__init__(self, **kwargs)
2598                 self.locked = False
2599
2600         def lock(self):
2601                 """
2602                 This raises an AlreadyLocked exception if lock() is called
2603                 while a lock is already held. In order to avoid this, call
2604                 unlock() or check whether the "locked" attribute is True
2605                 or False before calling lock().
2606                 """
2607                 if self._lock_obj is not None:
2608                         raise self.AlreadyLocked((self._lock_obj,))
2609
2610                 dir_path = self.dir_path
2611                 if dir_path is None:
2612                         root_config = self.pkg.root_config
2613                         portdb = root_config.trees["porttree"].dbapi
2614                         ebuild_path = portdb.findname(self.pkg.cpv)
2615                         settings = self.settings
2616                         settings.setcpv(self.pkg)
2617                         debug = settings.get("PORTAGE_DEBUG") == "1"
2618                         use_cache = 1 # always true
2619                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2620                                 self.settings, debug, use_cache, portdb)
2621                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2622
2623                 catdir = os.path.dirname(dir_path)
2624                 self._catdir = catdir
2625
2626                 portage.util.ensure_dirs(os.path.dirname(catdir),
2627                         gid=portage.portage_gid,
2628                         mode=070, mask=0)
2629                 catdir_lock = None
2630                 try:
2631                         catdir_lock = portage.locks.lockdir(catdir)
2632                         portage.util.ensure_dirs(catdir,
2633                                 gid=portage.portage_gid,
2634                                 mode=070, mask=0)
2635                         self._lock_obj = portage.locks.lockdir(dir_path)
2636                 finally:
2637                         self.locked = self._lock_obj is not None
2638                         if catdir_lock is not None:
2639                                 portage.locks.unlockdir(catdir_lock)
2640
2641         def clean_log(self):
2642                 """Discard existing log."""
2643                 settings = self.settings
2644
2645                 for x in ('.logid', 'temp/build.log'):
2646                         try:
2647                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2648                         except OSError:
2649                                 pass
2650
2651         def unlock(self):
2652                 if self._lock_obj is None:
2653                         return
2654
2655                 portage.locks.unlockdir(self._lock_obj)
2656                 self._lock_obj = None
2657                 self.locked = False
2658
2659                 catdir = self._catdir
2660                 catdir_lock = None
2661                 try:
2662                         catdir_lock = portage.locks.lockdir(catdir)
2663                 finally:
2664                         if catdir_lock:
2665                                 try:
2666                                         os.rmdir(catdir)
2667                                 except OSError, e:
2668                                         if e.errno not in (errno.ENOENT,
2669                                                 errno.ENOTEMPTY, errno.EEXIST):
2670                                                 raise
2671                                         del e
2672                                 portage.locks.unlockdir(catdir_lock)
2673
2674         class AlreadyLocked(portage.exception.PortageException):
2675                 pass
2676
2677 class EbuildBuild(CompositeTask):
2678
2679         __slots__ = ("args_set", "config_pool", "find_blockers",
2680                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2681                 "prefetcher", "settings", "world_atom") + \
2682                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2683
2684         def _start(self):
2685
2686                 logger = self.logger
2687                 opts = self.opts
2688                 pkg = self.pkg
2689                 settings = self.settings
2690                 world_atom = self.world_atom
2691                 root_config = pkg.root_config
2692                 tree = "porttree"
2693                 self._tree = tree
2694                 portdb = root_config.trees[tree].dbapi
2695                 settings.setcpv(pkg)
2696                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2697                 ebuild_path = portdb.findname(self.pkg.cpv)
2698                 self._ebuild_path = ebuild_path
2699
2700                 prefetcher = self.prefetcher
2701                 if prefetcher is None:
2702                         pass
2703                 elif not prefetcher.isAlive():
2704                         prefetcher.cancel()
2705                 elif prefetcher.poll() is None:
2706
2707                         waiting_msg = "Fetching files " + \
2708                                 "in the background. " + \
2709                                 "To view fetch progress, run `tail -f " + \
2710                                 "/var/log/emerge-fetch.log` in another " + \
2711                                 "terminal."
2712                         msg_prefix = colorize("GOOD", " * ")
2713                         from textwrap import wrap
2714                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2715                                 for line in wrap(waiting_msg, 65))
2716                         if not self.background:
2717                                 writemsg(waiting_msg, noiselevel=-1)
2718
2719                         self._current_task = prefetcher
2720                         prefetcher.addExitListener(self._prefetch_exit)
2721                         return
2722
2723                 self._prefetch_exit(prefetcher)
2724
2725         def _prefetch_exit(self, prefetcher):
2726
2727                 opts = self.opts
2728                 pkg = self.pkg
2729                 settings = self.settings
2730
2731                 if opts.fetchonly:
2732                                 fetcher = EbuildFetchonly(
2733                                         fetch_all=opts.fetch_all_uri,
2734                                         pkg=pkg, pretend=opts.pretend,
2735                                         settings=settings)
2736                                 retval = fetcher.execute()
2737                                 self.returncode = retval
2738                                 self.wait()
2739                                 return
2740
2741                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2742                         fetchall=opts.fetch_all_uri,
2743                         fetchonly=opts.fetchonly,
2744                         background=self.background,
2745                         pkg=pkg, scheduler=self.scheduler)
2746
2747                 self._start_task(fetcher, self._fetch_exit)
2748
2749         def _fetch_exit(self, fetcher):
2750                 opts = self.opts
2751                 pkg = self.pkg
2752
2753                 fetch_failed = False
2754                 if opts.fetchonly:
2755                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2756                 else:
2757                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2758
2759                 if fetch_failed and fetcher.logfile is not None and \
2760                         os.path.exists(fetcher.logfile):
2761                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2762
2763                 if not fetch_failed and fetcher.logfile is not None:
2764                         # Fetch was successful, so remove the fetch log.
2765                         try:
2766                                 os.unlink(fetcher.logfile)
2767                         except OSError:
2768                                 pass
2769
2770                 if fetch_failed or opts.fetchonly:
2771                         self.wait()
2772                         return
2773
2774                 logger = self.logger
2775                 opts = self.opts
2776                 pkg_count = self.pkg_count
2777                 scheduler = self.scheduler
2778                 settings = self.settings
2779                 features = settings.features
2780                 ebuild_path = self._ebuild_path
2781                 system_set = pkg.root_config.sets["system"]
2782
2783                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2784                 self._build_dir.lock()
2785
2786                 # Cleaning is triggered before the setup
2787                 # phase, in portage.doebuild().
2788                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2789                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2790                 short_msg = "emerge: (%s of %s) %s Clean" % \
2791                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2792                 logger.log(msg, short_msg=short_msg)
2793
2794                 #buildsyspkg: Check if we need to _force_ binary package creation
2795                 self._issyspkg = "buildsyspkg" in features and \
2796                                 system_set.findAtomForPackage(pkg) and \
2797                                 not opts.buildpkg
2798
2799                 if opts.buildpkg or self._issyspkg:
2800
2801                         self._buildpkg = True
2802
2803                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2804                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2805                         short_msg = "emerge: (%s of %s) %s Compile" % \
2806                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2807                         logger.log(msg, short_msg=short_msg)
2808
2809                 else:
2810                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2811                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2812                         short_msg = "emerge: (%s of %s) %s Compile" % \
2813                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2814                         logger.log(msg, short_msg=short_msg)
2815
2816                 build = EbuildExecuter(background=self.background, pkg=pkg,
2817                         scheduler=scheduler, settings=settings)
2818                 self._start_task(build, self._build_exit)
2819
2820         def _unlock_builddir(self):
2821                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2822                 self._build_dir.unlock()
2823
2824         def _build_exit(self, build):
2825                 if self._default_exit(build) != os.EX_OK:
2826                         self._unlock_builddir()
2827                         self.wait()
2828                         return
2829
2830                 opts = self.opts
2831                 buildpkg = self._buildpkg
2832
2833                 if not buildpkg:
2834                         self._final_exit(build)
2835                         self.wait()
2836                         return
2837
2838                 if self._issyspkg:
2839                         msg = ">>> This is a system package, " + \
2840                                 "let's pack a rescue tarball.\n"
2841
2842                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2843                         if log_path is not None:
2844                                 log_file = open(log_path, 'a')
2845                                 try:
2846                                         log_file.write(msg)
2847                                 finally:
2848                                         log_file.close()
2849
2850                         if not self.background:
2851                                 portage.writemsg_stdout(msg, noiselevel=-1)
2852
2853                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2854                         scheduler=self.scheduler, settings=self.settings)
2855
2856                 self._start_task(packager, self._buildpkg_exit)
2857
2858         def _buildpkg_exit(self, packager):
2859                 """
2860                 Released build dir lock when there is a failure or
2861                 when in buildpkgonly mode. Otherwise, the lock will
2862                 be released when merge() is called.
2863                 """
2864
2865                 if self._default_exit(packager) != os.EX_OK:
2866                         self._unlock_builddir()
2867                         self.wait()
2868                         return
2869
2870                 if self.opts.buildpkgonly:
2871                         # Need to call "clean" phase for buildpkgonly mode
2872                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2873                         phase = "clean"
2874                         clean_phase = EbuildPhase(background=self.background,
2875                                 pkg=self.pkg, phase=phase,
2876                                 scheduler=self.scheduler, settings=self.settings,
2877                                 tree=self._tree)
2878                         self._start_task(clean_phase, self._clean_exit)
2879                         return
2880
2881                 # Continue holding the builddir lock until
2882                 # after the package has been installed.
2883                 self._current_task = None
2884                 self.returncode = packager.returncode
2885                 self.wait()
2886
2887         def _clean_exit(self, clean_phase):
2888                 if self._final_exit(clean_phase) != os.EX_OK or \
2889                         self.opts.buildpkgonly:
2890                         self._unlock_builddir()
2891                 self.wait()
2892
2893         def install(self):
2894                 """
2895                 Install the package and then clean up and release locks.
2896                 Only call this after the build has completed successfully
2897                 and neither fetchonly nor buildpkgonly mode are enabled.
2898                 """
2899
2900                 find_blockers = self.find_blockers
2901                 ldpath_mtimes = self.ldpath_mtimes
2902                 logger = self.logger
2903                 pkg = self.pkg
2904                 pkg_count = self.pkg_count
2905                 settings = self.settings
2906                 world_atom = self.world_atom
2907                 ebuild_path = self._ebuild_path
2908                 tree = self._tree
2909
2910                 merge = EbuildMerge(find_blockers=self.find_blockers,
2911                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2912                         pkg_count=pkg_count, pkg_path=ebuild_path,
2913                         scheduler=self.scheduler,
2914                         settings=settings, tree=tree, world_atom=world_atom)
2915
2916                 msg = " === (%s of %s) Merging (%s::%s)" % \
2917                         (pkg_count.curval, pkg_count.maxval,
2918                         pkg.cpv, ebuild_path)
2919                 short_msg = "emerge: (%s of %s) %s Merge" % \
2920                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2921                 logger.log(msg, short_msg=short_msg)
2922
2923                 try:
2924                         rval = merge.execute()
2925                 finally:
2926                         self._unlock_builddir()
2927
2928                 return rval
2929
2930 class EbuildExecuter(CompositeTask):
2931
2932         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2933
2934         _phases = ("prepare", "configure", "compile", "test", "install")
2935
2936         _live_eclasses = frozenset([
2937                 "bzr",
2938                 "cvs",
2939                 "darcs",
2940                 "git",
2941                 "mercurial",
2942                 "subversion"
2943         ])
2944
2945         def _start(self):
2946                 self._tree = "porttree"
2947                 pkg = self.pkg
2948                 phase = "clean"
2949                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2950                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2951                 self._start_task(clean_phase, self._clean_phase_exit)
2952
2953         def _clean_phase_exit(self, clean_phase):
2954
2955                 if self._default_exit(clean_phase) != os.EX_OK:
2956                         self.wait()
2957                         return
2958
2959                 pkg = self.pkg
2960                 scheduler = self.scheduler
2961                 settings = self.settings
2962                 cleanup = 1
2963
2964                 # This initializes PORTAGE_LOG_FILE.
2965                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2966
2967                 setup_phase = EbuildPhase(background=self.background,
2968                         pkg=pkg, phase="setup", scheduler=scheduler,
2969                         settings=settings, tree=self._tree)
2970
2971                 setup_phase.addExitListener(self._setup_exit)
2972                 self._current_task = setup_phase
2973                 self.scheduler.scheduleSetup(setup_phase)
2974
2975         def _setup_exit(self, setup_phase):
2976
2977                 if self._default_exit(setup_phase) != os.EX_OK:
2978                         self.wait()
2979                         return
2980
2981                 unpack_phase = EbuildPhase(background=self.background,
2982                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2983                         settings=self.settings, tree=self._tree)
2984
2985                 if self._live_eclasses.intersection(self.pkg.inherited):
2986                         # Serialize $DISTDIR access for live ebuilds since
2987                         # otherwise they can interfere with eachother.
2988
2989                         unpack_phase.addExitListener(self._unpack_exit)
2990                         self._current_task = unpack_phase
2991                         self.scheduler.scheduleUnpack(unpack_phase)
2992
2993                 else:
2994                         self._start_task(unpack_phase, self._unpack_exit)
2995
2996         def _unpack_exit(self, unpack_phase):
2997
2998                 if self._default_exit(unpack_phase) != os.EX_OK:
2999                         self.wait()
3000                         return
3001
3002                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3003
3004                 pkg = self.pkg
3005                 phases = self._phases
3006                 eapi = pkg.metadata["EAPI"]
3007                 if eapi in ("0", "1"):
3008                         # skip src_prepare and src_configure
3009                         phases = phases[2:]
3010
3011                 for phase in phases:
3012                         ebuild_phases.add(EbuildPhase(background=self.background,
3013                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3014                                 settings=self.settings, tree=self._tree))
3015
3016                 self._start_task(ebuild_phases, self._default_final_exit)
3017
3018 class EbuildMetadataPhase(SubProcess):
3019
3020         """
3021         Asynchronous interface for the ebuild "depend" phase which is
3022         used to extract metadata from the ebuild.
3023         """
3024
3025         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3026                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3027                 ("_raw_metadata",)
3028
3029         _file_names = ("ebuild",)
3030         _files_dict = slot_dict_class(_file_names, prefix="")
3031         _metadata_fd = 9
3032
3033         def _start(self):
3034                 settings = self.settings
3035                 settings.setcpv(self.cpv)
3036                 ebuild_path = self.ebuild_path
3037
3038                 eapi = None
3039                 if 'parse-eapi-glep-55' in settings.features:
3040                         pf, eapi = portage._split_ebuild_name_glep55(
3041                                 os.path.basename(ebuild_path))
3042                 if eapi is None and \
3043                         'parse-eapi-ebuild-head' in settings.features:
3044                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3045                                 mode='r', encoding='utf_8', errors='replace'))
3046
3047                 if eapi is not None:
3048                         if not portage.eapi_is_supported(eapi):
3049                                 self.metadata_callback(self.cpv, self.ebuild_path,
3050                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3051                                 self.returncode = os.EX_OK
3052                                 self.wait()
3053                                 return
3054
3055                         settings.configdict['pkg']['EAPI'] = eapi
3056
3057                 debug = settings.get("PORTAGE_DEBUG") == "1"
3058                 master_fd = None
3059                 slave_fd = None
3060                 fd_pipes = None
3061                 if self.fd_pipes is not None:
3062                         fd_pipes = self.fd_pipes.copy()
3063                 else:
3064                         fd_pipes = {}
3065
3066                 fd_pipes.setdefault(0, sys.stdin.fileno())
3067                 fd_pipes.setdefault(1, sys.stdout.fileno())
3068                 fd_pipes.setdefault(2, sys.stderr.fileno())
3069
3070                 # flush any pending output
3071                 for fd in fd_pipes.itervalues():
3072                         if fd == sys.stdout.fileno():
3073                                 sys.stdout.flush()
3074                         if fd == sys.stderr.fileno():
3075                                 sys.stderr.flush()
3076
3077                 fd_pipes_orig = fd_pipes.copy()
3078                 self._files = self._files_dict()
3079                 files = self._files
3080
3081                 master_fd, slave_fd = os.pipe()
3082                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3083                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3084
3085                 fd_pipes[self._metadata_fd] = slave_fd
3086
3087                 self._raw_metadata = []
3088                 files.ebuild = os.fdopen(master_fd, 'r')
3089                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3090                         self._registered_events, self._output_handler)
3091                 self._registered = True
3092
3093                 retval = portage.doebuild(ebuild_path, "depend",
3094                         settings["ROOT"], settings, debug,
3095                         mydbapi=self.portdb, tree="porttree",
3096                         fd_pipes=fd_pipes, returnpid=True)
3097
3098                 os.close(slave_fd)
3099
3100                 if isinstance(retval, int):
3101                         # doebuild failed before spawning
3102                         self._unregister()
3103                         self.returncode = retval
3104                         self.wait()
3105                         return
3106
3107                 self.pid = retval[0]
3108                 portage.process.spawned_pids.remove(self.pid)
3109
3110         def _output_handler(self, fd, event):
3111
3112                 if event & PollConstants.POLLIN:
3113                         self._raw_metadata.append(self._files.ebuild.read())
3114                         if not self._raw_metadata[-1]:
3115                                 self._unregister()
3116                                 self.wait()
3117
3118                 self._unregister_if_appropriate(event)
3119                 return self._registered
3120
3121         def _set_returncode(self, wait_retval):
3122                 SubProcess._set_returncode(self, wait_retval)
3123                 if self.returncode == os.EX_OK:
3124                         metadata_lines = "".join(self._raw_metadata).splitlines()
3125                         if len(portage.auxdbkeys) != len(metadata_lines):
3126                                 # Don't trust bash's returncode if the
3127                                 # number of lines is incorrect.
3128                                 self.returncode = 1
3129                         else:
3130                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3131                                 self.metadata = self.metadata_callback(self.cpv,
3132                                         self.ebuild_path, self.repo_path, metadata,
3133                                         self.ebuild_mtime)
3134
3135 class EbuildProcess(SpawnProcess):
3136
3137         __slots__ = ("phase", "pkg", "settings", "tree")
3138
3139         def _start(self):
3140                 # Don't open the log file during the clean phase since the
3141                 # open file can result in an nfs lock on $T/build.log which
3142                 # prevents the clean phase from removing $T.
3143                 if self.phase not in ("clean", "cleanrm"):
3144                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3145                 SpawnProcess._start(self)
3146
3147         def _pipe(self, fd_pipes):
3148                 stdout_pipe = fd_pipes.get(1)
3149                 got_pty, master_fd, slave_fd = \
3150                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3151                 return (master_fd, slave_fd)
3152
3153         def _spawn(self, args, **kwargs):
3154
3155                 root_config = self.pkg.root_config
3156                 tree = self.tree
3157                 mydbapi = root_config.trees[tree].dbapi
3158                 settings = self.settings
3159                 ebuild_path = settings["EBUILD"]
3160                 debug = settings.get("PORTAGE_DEBUG") == "1"
3161
3162                 rval = portage.doebuild(ebuild_path, self.phase,
3163                         root_config.root, settings, debug,
3164                         mydbapi=mydbapi, tree=tree, **kwargs)
3165
3166                 return rval
3167
3168         def _set_returncode(self, wait_retval):
3169                 SpawnProcess._set_returncode(self, wait_retval)
3170
3171                 if self.phase not in ("clean", "cleanrm"):
3172                         self.returncode = portage._doebuild_exit_status_check_and_log(
3173                                 self.settings, self.phase, self.returncode)
3174
3175                 if self.phase == "test" and self.returncode != os.EX_OK and \
3176                         "test-fail-continue" in self.settings.features:
3177                         self.returncode = os.EX_OK
3178
3179                 portage._post_phase_userpriv_perms(self.settings)
3180
3181 class EbuildPhase(CompositeTask):
3182
3183         __slots__ = ("background", "pkg", "phase",
3184                 "scheduler", "settings", "tree")
3185
3186         _post_phase_cmds = portage._post_phase_cmds
3187
3188         def _start(self):
3189
3190                 ebuild_process = EbuildProcess(background=self.background,
3191                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3192                         settings=self.settings, tree=self.tree)
3193
3194                 self._start_task(ebuild_process, self._ebuild_exit)
3195
3196         def _ebuild_exit(self, ebuild_process):
3197
3198                 if self.phase == "install":
3199                         out = None
3200                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3201                         log_file = None
3202                         if self.background and log_path is not None:
3203                                 log_file = open(log_path, 'a')
3204                                 out = log_file
3205                         try:
3206                                 portage._check_build_log(self.settings, out=out)
3207                         finally:
3208                                 if log_file is not None:
3209                                         log_file.close()
3210
3211                 if self._default_exit(ebuild_process) != os.EX_OK:
3212                         self.wait()
3213                         return
3214
3215                 settings = self.settings
3216
3217                 if self.phase == "install":
3218                         portage._post_src_install_chost_fix(settings)
3219                         portage._post_src_install_uid_fix(settings)
3220
3221                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3222                 if post_phase_cmds is not None:
3223                         post_phase = MiscFunctionsProcess(background=self.background,
3224                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3225                                 scheduler=self.scheduler, settings=settings)
3226                         self._start_task(post_phase, self._post_phase_exit)
3227                         return
3228
3229                 self.returncode = ebuild_process.returncode
3230                 self._current_task = None
3231                 self.wait()
3232
3233         def _post_phase_exit(self, post_phase):
3234                 if self._final_exit(post_phase) != os.EX_OK:
3235                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3236                                 noiselevel=-1)
3237                 self._current_task = None
3238                 self.wait()
3239                 return
3240
3241 class EbuildBinpkg(EbuildProcess):
3242         """
3243         This assumes that src_install() has successfully completed.
3244         """
3245         __slots__ = ("_binpkg_tmpfile",)
3246
3247         def _start(self):
3248                 self.phase = "package"
3249                 self.tree = "porttree"
3250                 pkg = self.pkg
3251                 root_config = pkg.root_config
3252                 portdb = root_config.trees["porttree"].dbapi
3253                 bintree = root_config.trees["bintree"]
3254                 ebuild_path = portdb.findname(self.pkg.cpv)
3255                 settings = self.settings
3256                 debug = settings.get("PORTAGE_DEBUG") == "1"
3257
3258                 bintree.prevent_collision(pkg.cpv)
3259                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3260                         pkg.cpv + ".tbz2." + str(os.getpid()))
3261                 self._binpkg_tmpfile = binpkg_tmpfile
3262                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3263                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3264
3265                 try:
3266                         EbuildProcess._start(self)
3267                 finally:
3268                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3269
3270         def _set_returncode(self, wait_retval):
3271                 EbuildProcess._set_returncode(self, wait_retval)
3272
3273                 pkg = self.pkg
3274                 bintree = pkg.root_config.trees["bintree"]
3275                 binpkg_tmpfile = self._binpkg_tmpfile
3276                 if self.returncode == os.EX_OK:
3277                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3278
3279 class EbuildMerge(SlotObject):
3280
3281         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3282                 "pkg", "pkg_count", "pkg_path", "pretend",
3283                 "scheduler", "settings", "tree", "world_atom")
3284
3285         def execute(self):
3286                 root_config = self.pkg.root_config
3287                 settings = self.settings
3288                 retval = portage.merge(settings["CATEGORY"],
3289                         settings["PF"], settings["D"],
3290                         os.path.join(settings["PORTAGE_BUILDDIR"],
3291                         "build-info"), root_config.root, settings,
3292                         myebuild=settings["EBUILD"],
3293                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3294                         vartree=root_config.trees["vartree"],
3295                         prev_mtimes=self.ldpath_mtimes,
3296                         scheduler=self.scheduler,
3297                         blockers=self.find_blockers)
3298
3299                 if retval == os.EX_OK:
3300                         self.world_atom(self.pkg)
3301                         self._log_success()
3302
3303                 return retval
3304
3305         def _log_success(self):
3306                 pkg = self.pkg
3307                 pkg_count = self.pkg_count
3308                 pkg_path = self.pkg_path
3309                 logger = self.logger
3310                 if "noclean" not in self.settings.features:
3311                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3312                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3313                         logger.log((" === (%s of %s) " + \
3314                                 "Post-Build Cleaning (%s::%s)") % \
3315                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3316                                 short_msg=short_msg)
3317                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3318                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3319
3320 class PackageUninstall(AsynchronousTask):
3321
3322         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3323
3324         def _start(self):
3325                 try:
3326                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3327                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3328                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3329                                 writemsg_level=self._writemsg_level)
3330                 except UninstallFailure, e:
3331                         self.returncode = e.status
3332                 else:
3333                         self.returncode = os.EX_OK
3334                 self.wait()
3335
3336         def _writemsg_level(self, msg, level=0, noiselevel=0):
3337
3338                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3339                 background = self.background
3340
3341                 if log_path is None:
3342                         if not (background and level < logging.WARNING):
3343                                 portage.util.writemsg_level(msg,
3344                                         level=level, noiselevel=noiselevel)
3345                 else:
3346                         if not background:
3347                                 portage.util.writemsg_level(msg,
3348                                         level=level, noiselevel=noiselevel)
3349
3350                         f = open(log_path, 'a')
3351                         try:
3352                                 f.write(msg)
3353                         finally:
3354                                 f.close()
3355
3356 class Binpkg(CompositeTask):
3357
3358         __slots__ = ("find_blockers",
3359                 "ldpath_mtimes", "logger", "opts",
3360                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3361                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3362                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3363
3364         def _writemsg_level(self, msg, level=0, noiselevel=0):
3365
3366                 if not self.background:
3367                         portage.util.writemsg_level(msg,
3368                                 level=level, noiselevel=noiselevel)
3369
3370                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3371                 if  log_path is not None:
3372                         f = open(log_path, 'a')
3373                         try:
3374                                 f.write(msg)
3375                         finally:
3376                                 f.close()
3377
3378         def _start(self):
3379
3380                 pkg = self.pkg
3381                 settings = self.settings
3382                 settings.setcpv(pkg)
3383                 self._tree = "bintree"
3384                 self._bintree = self.pkg.root_config.trees[self._tree]
3385                 self._verify = not self.opts.pretend
3386
3387                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3388                         "portage", pkg.category, pkg.pf)
3389                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3390                         pkg=pkg, settings=settings)
3391                 self._image_dir = os.path.join(dir_path, "image")
3392                 self._infloc = os.path.join(dir_path, "build-info")
3393                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3394                 settings["EBUILD"] = self._ebuild_path
3395                 debug = settings.get("PORTAGE_DEBUG") == "1"
3396                 portage.doebuild_environment(self._ebuild_path, "setup",
3397                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3398                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3399
3400                 # The prefetcher has already completed or it
3401                 # could be running now. If it's running now,
3402                 # wait for it to complete since it holds
3403                 # a lock on the file being fetched. The
3404                 # portage.locks functions are only designed
3405                 # to work between separate processes. Since
3406                 # the lock is held by the current process,
3407                 # use the scheduler and fetcher methods to
3408                 # synchronize with the fetcher.
3409                 prefetcher = self.prefetcher
3410                 if prefetcher is None:
3411                         pass
3412                 elif not prefetcher.isAlive():
3413                         prefetcher.cancel()
3414                 elif prefetcher.poll() is None:
3415
3416                         waiting_msg = ("Fetching '%s' " + \
3417                                 "in the background. " + \
3418                                 "To view fetch progress, run `tail -f " + \
3419                                 "/var/log/emerge-fetch.log` in another " + \
3420                                 "terminal.") % prefetcher.pkg_path
3421                         msg_prefix = colorize("GOOD", " * ")
3422                         from textwrap import wrap
3423                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3424                                 for line in wrap(waiting_msg, 65))
3425                         if not self.background:
3426                                 writemsg(waiting_msg, noiselevel=-1)
3427
3428                         self._current_task = prefetcher
3429                         prefetcher.addExitListener(self._prefetch_exit)
3430                         return
3431
3432                 self._prefetch_exit(prefetcher)
3433
3434         def _prefetch_exit(self, prefetcher):
3435
3436                 pkg = self.pkg
3437                 pkg_count = self.pkg_count
3438                 if not (self.opts.pretend or self.opts.fetchonly):
3439                         self._build_dir.lock()
3440                         # If necessary, discard old log so that we don't
3441                         # append to it.
3442                         self._build_dir.clean_log()
3443                         # Initialze PORTAGE_LOG_FILE.
3444                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3445                 fetcher = BinpkgFetcher(background=self.background,
3446                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3447                         pretend=self.opts.pretend, scheduler=self.scheduler)
3448                 pkg_path = fetcher.pkg_path
3449                 self._pkg_path = pkg_path
3450
3451                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3452
3453                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3454                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3455                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3456                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3457                         self.logger.log(msg, short_msg=short_msg)
3458                         self._start_task(fetcher, self._fetcher_exit)
3459                         return
3460
3461                 self._fetcher_exit(fetcher)
3462
3463         def _fetcher_exit(self, fetcher):
3464
3465                 # The fetcher only has a returncode when
3466                 # --getbinpkg is enabled.
3467                 if fetcher.returncode is not None:
3468                         self._fetched_pkg = True
3469                         if self._default_exit(fetcher) != os.EX_OK:
3470                                 self._unlock_builddir()
3471                                 self.wait()
3472                                 return
3473
3474                 if self.opts.pretend:
3475                         self._current_task = None
3476                         self.returncode = os.EX_OK
3477                         self.wait()
3478                         return
3479
3480                 verifier = None
3481                 if self._verify:
3482                         logfile = None
3483                         if self.background:
3484                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3485                         verifier = BinpkgVerifier(background=self.background,
3486                                 logfile=logfile, pkg=self.pkg)
3487                         self._start_task(verifier, self._verifier_exit)
3488                         return
3489
3490                 self._verifier_exit(verifier)
3491
3492         def _verifier_exit(self, verifier):
3493                 if verifier is not None and \
3494                         self._default_exit(verifier) != os.EX_OK:
3495                         self._unlock_builddir()
3496                         self.wait()
3497                         return
3498
3499                 logger = self.logger
3500                 pkg = self.pkg
3501                 pkg_count = self.pkg_count
3502                 pkg_path = self._pkg_path
3503
3504                 if self._fetched_pkg:
3505                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3506
3507                 if self.opts.fetchonly:
3508                         self._current_task = None
3509                         self.returncode = os.EX_OK
3510                         self.wait()
3511                         return
3512
3513                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3514                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3515                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3516                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3517                 logger.log(msg, short_msg=short_msg)
3518
3519                 phase = "clean"
3520                 settings = self.settings
3521                 ebuild_phase = EbuildPhase(background=self.background,
3522                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3523                         settings=settings, tree=self._tree)
3524
3525                 self._start_task(ebuild_phase, self._clean_exit)
3526
3527         def _clean_exit(self, clean_phase):
3528                 if self._default_exit(clean_phase) != os.EX_OK:
3529                         self._unlock_builddir()
3530                         self.wait()
3531                         return
3532
3533                 dir_path = self._build_dir.dir_path
3534
3535                 infloc = self._infloc
3536                 pkg = self.pkg
3537                 pkg_path = self._pkg_path
3538
3539                 dir_mode = 0755
3540                 for mydir in (dir_path, self._image_dir, infloc):
3541                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3542                                 gid=portage.data.portage_gid, mode=dir_mode)
3543
3544                 # This initializes PORTAGE_LOG_FILE.
3545                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3546                 self._writemsg_level(">>> Extracting info\n")
3547
3548                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3549                 check_missing_metadata = ("CATEGORY", "PF")
3550                 missing_metadata = set()
3551                 for k in check_missing_metadata:
3552                         v = pkg_xpak.getfile(k)
3553                         if not v:
3554                                 missing_metadata.add(k)
3555
3556                 pkg_xpak.unpackinfo(infloc)
3557                 for k in missing_metadata:
3558                         if k == "CATEGORY":
3559                                 v = pkg.category
3560                         elif k == "PF":
3561                                 v = pkg.pf
3562                         else:
3563                                 continue
3564
3565                         f = open(os.path.join(infloc, k), 'wb')
3566                         try:
3567                                 f.write(v + "\n")
3568                         finally:
3569                                 f.close()
3570
3571                 # Store the md5sum in the vdb.
3572                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3573                 try:
3574                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3575                 finally:
3576                         f.close()
3577
3578                 # This gives bashrc users an opportunity to do various things
3579                 # such as remove binary packages after they're installed.
3580                 settings = self.settings
3581                 settings.setcpv(self.pkg)
3582                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3583                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3584
3585                 phase = "setup"
3586                 setup_phase = EbuildPhase(background=self.background,
3587                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3588                         settings=settings, tree=self._tree)
3589
3590                 setup_phase.addExitListener(self._setup_exit)
3591                 self._current_task = setup_phase
3592                 self.scheduler.scheduleSetup(setup_phase)
3593
3594         def _setup_exit(self, setup_phase):
3595                 if self._default_exit(setup_phase) != os.EX_OK:
3596                         self._unlock_builddir()
3597                         self.wait()
3598                         return
3599
3600                 extractor = BinpkgExtractorAsync(background=self.background,
3601                         image_dir=self._image_dir,
3602                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3603                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3604                 self._start_task(extractor, self._extractor_exit)
3605
3606         def _extractor_exit(self, extractor):
3607                 if self._final_exit(extractor) != os.EX_OK:
3608                         self._unlock_builddir()
3609                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3610                                 noiselevel=-1)
3611                 self.wait()
3612
3613         def _unlock_builddir(self):
3614                 if self.opts.pretend or self.opts.fetchonly:
3615                         return
3616                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3617                 self._build_dir.unlock()
3618
3619         def install(self):
3620
3621                 # This gives bashrc users an opportunity to do various things
3622                 # such as remove binary packages after they're installed.
3623                 settings = self.settings
3624                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3625                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3626
3627                 merge = EbuildMerge(find_blockers=self.find_blockers,
3628                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3629                         pkg=self.pkg, pkg_count=self.pkg_count,
3630                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3631                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3632
3633                 try:
3634                         retval = merge.execute()
3635                 finally:
3636                         settings.pop("PORTAGE_BINPKG_FILE", None)
3637                         self._unlock_builddir()
3638                 return retval
3639
3640 class BinpkgFetcher(SpawnProcess):
3641
3642         __slots__ = ("pkg", "pretend",
3643                 "locked", "pkg_path", "_lock_obj")
3644
3645         def __init__(self, **kwargs):
3646                 SpawnProcess.__init__(self, **kwargs)
3647                 pkg = self.pkg
3648                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3649
3650         def _start(self):
3651
3652                 if self.cancelled:
3653                         return
3654
3655                 pkg = self.pkg
3656                 pretend = self.pretend
3657                 bintree = pkg.root_config.trees["bintree"]
3658                 settings = bintree.settings
3659                 use_locks = "distlocks" in settings.features
3660                 pkg_path = self.pkg_path
3661
3662                 if not pretend:
3663                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3664                         if use_locks:
3665                                 self.lock()
3666                 exists = os.path.exists(pkg_path)
3667                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3668                 if not (pretend or resume):
3669                         # Remove existing file or broken symlink.
3670                         try:
3671                                 os.unlink(pkg_path)
3672                         except OSError:
3673                                 pass
3674
3675                 # urljoin doesn't work correctly with
3676                 # unrecognized protocols like sftp
3677                 if bintree._remote_has_index:
3678                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3679                         if not rel_uri:
3680                                 rel_uri = pkg.cpv + ".tbz2"
3681                         uri = bintree._remote_base_uri.rstrip("/") + \
3682                                 "/" + rel_uri.lstrip("/")
3683                 else:
3684                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3685                                 "/" + pkg.pf + ".tbz2"
3686
3687                 if pretend:
3688                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3689                         self.returncode = os.EX_OK
3690                         self.wait()
3691                         return
3692
3693                 protocol = urlparse.urlparse(uri)[0]
3694                 fcmd_prefix = "FETCHCOMMAND"
3695                 if resume:
3696                         fcmd_prefix = "RESUMECOMMAND"
3697                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3698                 if not fcmd:
3699                         fcmd = settings.get(fcmd_prefix)
3700
3701                 fcmd_vars = {
3702                         "DISTDIR" : os.path.dirname(pkg_path),
3703                         "URI"     : uri,
3704                         "FILE"    : os.path.basename(pkg_path)
3705                 }
3706
3707                 fetch_env = dict(settings.iteritems())
3708                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3709                         for x in shlex.split(fcmd)]
3710
3711                 if self.fd_pipes is None:
3712                         self.fd_pipes = {}
3713                 fd_pipes = self.fd_pipes
3714
3715                 # Redirect all output to stdout since some fetchers like
3716                 # wget pollute stderr (if portage detects a problem then it
3717                 # can send it's own message to stderr).
3718                 fd_pipes.setdefault(0, sys.stdin.fileno())
3719                 fd_pipes.setdefault(1, sys.stdout.fileno())
3720                 fd_pipes.setdefault(2, sys.stdout.fileno())
3721
3722                 self.args = fetch_args
3723                 self.env = fetch_env
3724                 SpawnProcess._start(self)
3725
3726         def _set_returncode(self, wait_retval):
3727                 SpawnProcess._set_returncode(self, wait_retval)
3728                 if self.returncode == os.EX_OK:
3729                         # If possible, update the mtime to match the remote package if
3730                         # the fetcher didn't already do it automatically.
3731                         bintree = self.pkg.root_config.trees["bintree"]
3732                         if bintree._remote_has_index:
3733                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3734                                 if remote_mtime is not None:
3735                                         try:
3736                                                 remote_mtime = long(remote_mtime)
3737                                         except ValueError:
3738                                                 pass
3739                                         else:
3740                                                 try:
3741                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3742                                                 except OSError:
3743                                                         pass
3744                                                 else:
3745                                                         if remote_mtime != local_mtime:
3746                                                                 try:
3747                                                                         os.utime(self.pkg_path,
3748                                                                                 (remote_mtime, remote_mtime))
3749                                                                 except OSError:
3750                                                                         pass
3751
3752                 if self.locked:
3753                         self.unlock()
3754
3755         def lock(self):
3756                 """
3757                 This raises an AlreadyLocked exception if lock() is called
3758                 while a lock is already held. In order to avoid this, call
3759                 unlock() or check whether the "locked" attribute is True
3760                 or False before calling lock().
3761                 """
3762                 if self._lock_obj is not None:
3763                         raise self.AlreadyLocked((self._lock_obj,))
3764
3765                 self._lock_obj = portage.locks.lockfile(
3766                         self.pkg_path, wantnewlockfile=1)
3767                 self.locked = True
3768
3769         class AlreadyLocked(portage.exception.PortageException):
3770                 pass
3771
3772         def unlock(self):
3773                 if self._lock_obj is None:
3774                         return
3775                 portage.locks.unlockfile(self._lock_obj)
3776                 self._lock_obj = None
3777                 self.locked = False
3778
3779 class BinpkgVerifier(AsynchronousTask):
3780         __slots__ = ("logfile", "pkg",)
3781
3782         def _start(self):
3783                 """
3784                 Note: Unlike a normal AsynchronousTask.start() method,
3785                 this one does all work is synchronously. The returncode
3786                 attribute will be set before it returns.
3787                 """
3788
3789                 pkg = self.pkg
3790                 root_config = pkg.root_config
3791                 bintree = root_config.trees["bintree"]
3792                 rval = os.EX_OK
3793                 stdout_orig = sys.stdout
3794                 stderr_orig = sys.stderr
3795                 log_file = None
3796                 if self.background and self.logfile is not None:
3797                         log_file = open(self.logfile, 'a')
3798                 try:
3799                         if log_file is not None:
3800                                 sys.stdout = log_file
3801                                 sys.stderr = log_file
3802                         try:
3803                                 bintree.digestCheck(pkg)
3804                         except portage.exception.FileNotFound:
3805                                 writemsg("!!! Fetching Binary failed " + \
3806                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3807                                 rval = 1
3808                         except portage.exception.DigestException, e:
3809                                 writemsg("\n!!! Digest verification failed:\n",
3810                                         noiselevel=-1)
3811                                 writemsg("!!! %s\n" % e.value[0],
3812                                         noiselevel=-1)
3813                                 writemsg("!!! Reason: %s\n" % e.value[1],
3814                                         noiselevel=-1)
3815                                 writemsg("!!! Got: %s\n" % e.value[2],
3816                                         noiselevel=-1)
3817                                 writemsg("!!! Expected: %s\n" % e.value[3],
3818                                         noiselevel=-1)
3819                                 rval = 1
3820                         if rval != os.EX_OK:
3821                                 pkg_path = bintree.getname(pkg.cpv)
3822                                 head, tail = os.path.split(pkg_path)
3823                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3824                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3825                                         noiselevel=-1)
3826                 finally:
3827                         sys.stdout = stdout_orig
3828                         sys.stderr = stderr_orig
3829                         if log_file is not None:
3830                                 log_file.close()
3831
3832                 self.returncode = rval
3833                 self.wait()
3834
3835 class BinpkgPrefetcher(CompositeTask):
3836
3837         __slots__ = ("pkg",) + \
3838                 ("pkg_path", "_bintree",)
3839
3840         def _start(self):
3841                 self._bintree = self.pkg.root_config.trees["bintree"]
3842                 fetcher = BinpkgFetcher(background=self.background,
3843                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3844                         scheduler=self.scheduler)
3845                 self.pkg_path = fetcher.pkg_path
3846                 self._start_task(fetcher, self._fetcher_exit)
3847
3848         def _fetcher_exit(self, fetcher):
3849
3850                 if self._default_exit(fetcher) != os.EX_OK:
3851                         self.wait()
3852                         return
3853
3854                 verifier = BinpkgVerifier(background=self.background,
3855                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3856                 self._start_task(verifier, self._verifier_exit)
3857
3858         def _verifier_exit(self, verifier):
3859                 if self._default_exit(verifier) != os.EX_OK:
3860                         self.wait()
3861                         return
3862
3863                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3864
3865                 self._current_task = None
3866                 self.returncode = os.EX_OK
3867                 self.wait()
3868
3869 class BinpkgExtractorAsync(SpawnProcess):
3870
3871         __slots__ = ("image_dir", "pkg", "pkg_path")
3872
3873         _shell_binary = portage.const.BASH_BINARY
3874
3875         def _start(self):
3876                 self.args = [self._shell_binary, "-c",
3877                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3878                         (portage._shell_quote(self.pkg_path),
3879                         portage._shell_quote(self.image_dir))]
3880
3881                 self.env = self.pkg.root_config.settings.environ()
3882                 SpawnProcess._start(self)
3883
3884 class MergeListItem(CompositeTask):
3885
3886         """
3887         TODO: For parallel scheduling, everything here needs asynchronous
3888         execution support (start, poll, and wait methods).
3889         """
3890
3891         __slots__ = ("args_set",
3892                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3893                 "find_blockers", "logger", "mtimedb", "pkg",
3894                 "pkg_count", "pkg_to_replace", "prefetcher",
3895                 "settings", "statusMessage", "world_atom") + \
3896                 ("_install_task",)
3897
3898         def _start(self):
3899
3900                 pkg = self.pkg
3901                 build_opts = self.build_opts
3902
3903                 if pkg.installed:
3904                         # uninstall,  executed by self.merge()
3905                         self.returncode = os.EX_OK
3906                         self.wait()
3907                         return
3908
3909                 args_set = self.args_set
3910                 find_blockers = self.find_blockers
3911                 logger = self.logger
3912                 mtimedb = self.mtimedb
3913                 pkg_count = self.pkg_count
3914                 scheduler = self.scheduler
3915                 settings = self.settings
3916                 world_atom = self.world_atom
3917                 ldpath_mtimes = mtimedb["ldpath"]
3918
3919                 action_desc = "Emerging"
3920                 preposition = "for"
3921                 if pkg.type_name == "binary":
3922                         action_desc += " binary"
3923
3924                 if build_opts.fetchonly:
3925                         action_desc = "Fetching"
3926
3927                 msg = "%s (%s of %s) %s" % \
3928                         (action_desc,
3929                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3930                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3931                         colorize("GOOD", pkg.cpv))
3932
3933                 portdb = pkg.root_config.trees["porttree"].dbapi
3934                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3935                 if portdir_repo_name:
3936                         pkg_repo_name = pkg.metadata.get("repository")
3937                         if pkg_repo_name != portdir_repo_name:
3938                                 if not pkg_repo_name:
3939                                         pkg_repo_name = "unknown repo"
3940                                 msg += " from %s" % pkg_repo_name
3941
3942                 if pkg.root != "/":
3943                         msg += " %s %s" % (preposition, pkg.root)
3944
3945                 if not build_opts.pretend:
3946                         self.statusMessage(msg)
3947                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3948                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3949
3950                 if pkg.type_name == "ebuild":
3951
3952                         build = EbuildBuild(args_set=args_set,
3953                                 background=self.background,
3954                                 config_pool=self.config_pool,
3955                                 find_blockers=find_blockers,
3956                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3957                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3958                                 prefetcher=self.prefetcher, scheduler=scheduler,
3959                                 settings=settings, world_atom=world_atom)
3960
3961                         self._install_task = build
3962                         self._start_task(build, self._default_final_exit)
3963                         return
3964
3965                 elif pkg.type_name == "binary":
3966
3967                         binpkg = Binpkg(background=self.background,
3968                                 find_blockers=find_blockers,
3969                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3970                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3971                                 prefetcher=self.prefetcher, settings=settings,
3972                                 scheduler=scheduler, world_atom=world_atom)
3973
3974                         self._install_task = binpkg
3975                         self._start_task(binpkg, self._default_final_exit)
3976                         return
3977
3978         def _poll(self):
3979                 self._install_task.poll()
3980                 return self.returncode
3981
3982         def _wait(self):
3983                 self._install_task.wait()
3984                 return self.returncode
3985
3986         def merge(self):
3987
3988                 pkg = self.pkg
3989                 build_opts = self.build_opts
3990                 find_blockers = self.find_blockers
3991                 logger = self.logger
3992                 mtimedb = self.mtimedb
3993                 pkg_count = self.pkg_count
3994                 prefetcher = self.prefetcher
3995                 scheduler = self.scheduler
3996                 settings = self.settings
3997                 world_atom = self.world_atom
3998                 ldpath_mtimes = mtimedb["ldpath"]
3999
4000                 if pkg.installed:
4001                         if not (build_opts.buildpkgonly or \
4002                                 build_opts.fetchonly or build_opts.pretend):
4003
4004                                 uninstall = PackageUninstall(background=self.background,
4005                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4006                                         pkg=pkg, scheduler=scheduler, settings=settings)
4007
4008                                 uninstall.start()
4009                                 retval = uninstall.wait()
4010                                 if retval != os.EX_OK:
4011                                         return retval
4012                         return os.EX_OK
4013
4014                 if build_opts.fetchonly or \
4015                         build_opts.buildpkgonly:
4016                         return self.returncode
4017
4018                 retval = self._install_task.install()
4019                 return retval
4020
4021 class PackageMerge(AsynchronousTask):
4022         """
4023         TODO: Implement asynchronous merge so that the scheduler can
4024         run while a merge is executing.
4025         """
4026
4027         __slots__ = ("merge",)
4028
4029         def _start(self):
4030
4031                 pkg = self.merge.pkg
4032                 pkg_count = self.merge.pkg_count
4033
4034                 if pkg.installed:
4035                         action_desc = "Uninstalling"
4036                         preposition = "from"
4037                         counter_str = ""
4038                 else:
4039                         action_desc = "Installing"
4040                         preposition = "to"
4041                         counter_str = "(%s of %s) " % \
4042                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4043                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4044
4045                 msg = "%s %s%s" % \
4046                         (action_desc,
4047                         counter_str,
4048                         colorize("GOOD", pkg.cpv))
4049
4050                 if pkg.root != "/":
4051                         msg += " %s %s" % (preposition, pkg.root)
4052
4053                 if not self.merge.build_opts.fetchonly and \
4054                         not self.merge.build_opts.pretend and \
4055                         not self.merge.build_opts.buildpkgonly:
4056                         self.merge.statusMessage(msg)
4057
4058                 self.returncode = self.merge.merge()
4059                 self.wait()
4060
4061 class DependencyArg(object):
4062         def __init__(self, arg=None, root_config=None):
4063                 self.arg = arg
4064                 self.root_config = root_config
4065
4066         def __str__(self):
4067                 return str(self.arg)
4068
4069 class AtomArg(DependencyArg):
4070         def __init__(self, atom=None, **kwargs):
4071                 DependencyArg.__init__(self, **kwargs)
4072                 self.atom = atom
4073                 if not isinstance(self.atom, portage.dep.Atom):
4074                         self.atom = portage.dep.Atom(self.atom)
4075                 self.set = (self.atom, )
4076
4077 class PackageArg(DependencyArg):
4078         def __init__(self, package=None, **kwargs):
4079                 DependencyArg.__init__(self, **kwargs)
4080                 self.package = package
4081                 self.atom = portage.dep.Atom("=" + package.cpv)
4082                 self.set = (self.atom, )
4083
4084 class SetArg(DependencyArg):
4085         def __init__(self, set=None, **kwargs):
4086                 DependencyArg.__init__(self, **kwargs)
4087                 self.set = set
4088                 self.name = self.arg[len(SETPREFIX):]
4089
4090 class Dependency(SlotObject):
4091         __slots__ = ("atom", "blocker", "depth",
4092                 "parent", "onlydeps", "priority", "root")
4093         def __init__(self, **kwargs):
4094                 SlotObject.__init__(self, **kwargs)
4095                 if self.priority is None:
4096                         self.priority = DepPriority()
4097                 if self.depth is None:
4098                         self.depth = 0
4099
4100 class BlockerCache(portage.cache.mappings.MutableMapping):
4101         """This caches blockers of installed packages so that dep_check does not
4102         have to be done for every single installed package on every invocation of
4103         emerge.  The cache is invalidated whenever it is detected that something
4104         has changed that might alter the results of dep_check() calls:
4105                 1) the set of installed packages (including COUNTER) has changed
4106                 2) the old-style virtuals have changed
4107         """
4108
4109         # Number of uncached packages to trigger cache update, since
4110         # it's wasteful to update it for every vdb change.
4111         _cache_threshold = 5
4112
4113         class BlockerData(object):
4114
4115                 __slots__ = ("__weakref__", "atoms", "counter")
4116
4117                 def __init__(self, counter, atoms):
4118                         self.counter = counter
4119                         self.atoms = atoms
4120
4121         def __init__(self, myroot, vardb):
4122                 self._vardb = vardb
4123                 self._virtuals = vardb.settings.getvirtuals()
4124                 self._cache_filename = os.path.join(myroot,
4125                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4126                 self._cache_version = "1"
4127                 self._cache_data = None
4128                 self._modified = set()
4129                 self._load()
4130
4131         def _load(self):
4132                 try:
4133                         f = open(self._cache_filename, mode='rb')
4134                         mypickle = pickle.Unpickler(f)
4135                         try:
4136                                 mypickle.find_global = None
4137                         except AttributeError:
4138                                 # TODO: If py3k, override Unpickler.find_class().
4139                                 pass
4140                         self._cache_data = mypickle.load()
4141                         f.close()
4142                         del f
4143                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4144                         if isinstance(e, pickle.UnpicklingError):
4145                                 writemsg("!!! Error loading '%s': %s\n" % \
4146                                         (self._cache_filename, str(e)), noiselevel=-1)
4147                         del e
4148
4149                 cache_valid = self._cache_data and \
4150                         isinstance(self._cache_data, dict) and \
4151                         self._cache_data.get("version") == self._cache_version and \
4152                         isinstance(self._cache_data.get("blockers"), dict)
4153                 if cache_valid:
4154                         # Validate all the atoms and counters so that
4155                         # corruption is detected as soon as possible.
4156                         invalid_items = set()
4157                         for k, v in self._cache_data["blockers"].iteritems():
4158                                 if not isinstance(k, basestring):
4159                                         invalid_items.add(k)
4160                                         continue
4161                                 try:
4162                                         if portage.catpkgsplit(k) is None:
4163                                                 invalid_items.add(k)
4164                                                 continue
4165                                 except portage.exception.InvalidData:
4166                                         invalid_items.add(k)
4167                                         continue
4168                                 if not isinstance(v, tuple) or \
4169                                         len(v) != 2:
4170                                         invalid_items.add(k)
4171                                         continue
4172                                 counter, atoms = v
4173                                 if not isinstance(counter, (int, long)):
4174                                         invalid_items.add(k)
4175                                         continue
4176                                 if not isinstance(atoms, (list, tuple)):
4177                                         invalid_items.add(k)
4178                                         continue
4179                                 invalid_atom = False
4180                                 for atom in atoms:
4181                                         if not isinstance(atom, basestring):
4182                                                 invalid_atom = True
4183                                                 break
4184                                         if atom[:1] != "!" or \
4185                                                 not portage.isvalidatom(
4186                                                 atom, allow_blockers=True):
4187                                                 invalid_atom = True
4188                                                 break
4189                                 if invalid_atom:
4190                                         invalid_items.add(k)
4191                                         continue
4192
4193                         for k in invalid_items:
4194                                 del self._cache_data["blockers"][k]
4195                         if not self._cache_data["blockers"]:
4196                                 cache_valid = False
4197
4198                 if not cache_valid:
4199                         self._cache_data = {"version":self._cache_version}
4200                         self._cache_data["blockers"] = {}
4201                         self._cache_data["virtuals"] = self._virtuals
4202                 self._modified.clear()
4203
4204         def flush(self):
4205                 """If the current user has permission and the internal blocker cache
4206                 been updated, save it to disk and mark it unmodified.  This is called
4207                 by emerge after it has proccessed blockers for all installed packages.
4208                 Currently, the cache is only written if the user has superuser
4209                 privileges (since that's required to obtain a lock), but all users
4210                 have read access and benefit from faster blocker lookups (as long as
4211                 the entire cache is still valid).  The cache is stored as a pickled
4212                 dict object with the following format:
4213
4214                 {
4215                         version : "1",
4216                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4217                         "virtuals" : vardb.settings.getvirtuals()
4218                 }
4219                 """
4220                 if len(self._modified) >= self._cache_threshold and \
4221                         secpass >= 2:
4222                         try:
4223                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4224                                 pickle.dump(self._cache_data, f, protocol=2)
4225                                 f.close()
4226                                 portage.util.apply_secpass_permissions(
4227                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4228                         except (IOError, OSError), e:
4229                                 pass
4230                         self._modified.clear()
4231
4232         def __setitem__(self, cpv, blocker_data):
4233                 """
4234                 Update the cache and mark it as modified for a future call to
4235                 self.flush().
4236
4237                 @param cpv: Package for which to cache blockers.
4238                 @type cpv: String
4239                 @param blocker_data: An object with counter and atoms attributes.
4240                 @type blocker_data: BlockerData
4241                 """
4242                 self._cache_data["blockers"][cpv] = \
4243                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4244                 self._modified.add(cpv)
4245
4246         def __iter__(self):
4247                 if self._cache_data is None:
4248                         # triggered by python-trace
4249                         return iter([])
4250                 return iter(self._cache_data["blockers"])
4251
4252         def __delitem__(self, cpv):
4253                 del self._cache_data["blockers"][cpv]
4254
4255         def __getitem__(self, cpv):
4256                 """
4257                 @rtype: BlockerData
4258                 @returns: An object with counter and atoms attributes.
4259                 """
4260                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4261
4262 class BlockerDB(object):
4263
4264         def __init__(self, root_config):
4265                 self._root_config = root_config
4266                 self._vartree = root_config.trees["vartree"]
4267                 self._portdb = root_config.trees["porttree"].dbapi
4268
4269                 self._dep_check_trees = None
4270                 self._fake_vartree = None
4271
4272         def _get_fake_vartree(self, acquire_lock=0):
4273                 fake_vartree = self._fake_vartree
4274                 if fake_vartree is None:
4275                         fake_vartree = FakeVartree(self._root_config,
4276                                 acquire_lock=acquire_lock)
4277                         self._fake_vartree = fake_vartree
4278                         self._dep_check_trees = { self._vartree.root : {
4279                                 "porttree"    :  fake_vartree,
4280                                 "vartree"     :  fake_vartree,
4281                         }}
4282                 else:
4283                         fake_vartree.sync(acquire_lock=acquire_lock)
4284                 return fake_vartree
4285
4286         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4287                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4288                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4289                 settings = self._vartree.settings
4290                 stale_cache = set(blocker_cache)
4291                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4292                 dep_check_trees = self._dep_check_trees
4293                 vardb = fake_vartree.dbapi
4294                 installed_pkgs = list(vardb)
4295
4296                 for inst_pkg in installed_pkgs:
4297                         stale_cache.discard(inst_pkg.cpv)
4298                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4299                         if cached_blockers is not None and \
4300                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4301                                 cached_blockers = None
4302                         if cached_blockers is not None:
4303                                 blocker_atoms = cached_blockers.atoms
4304                         else:
4305                                 # Use aux_get() to trigger FakeVartree global
4306                                 # updates on *DEPEND when appropriate.
4307                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4308                                 try:
4309                                         portage.dep._dep_check_strict = False
4310                                         success, atoms = portage.dep_check(depstr,
4311                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4312                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4313                                 finally:
4314                                         portage.dep._dep_check_strict = True
4315                                 if not success:
4316                                         pkg_location = os.path.join(inst_pkg.root,
4317                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4318                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4319                                                 (pkg_location, atoms), noiselevel=-1)
4320                                         continue
4321
4322                                 blocker_atoms = [atom for atom in atoms \
4323                                         if atom.startswith("!")]
4324                                 blocker_atoms.sort()
4325                                 counter = long(inst_pkg.metadata["COUNTER"])
4326                                 blocker_cache[inst_pkg.cpv] = \
4327                                         blocker_cache.BlockerData(counter, blocker_atoms)
4328                 for cpv in stale_cache:
4329                         del blocker_cache[cpv]
4330                 blocker_cache.flush()
4331
4332                 blocker_parents = digraph()
4333                 blocker_atoms = []
4334                 for pkg in installed_pkgs:
4335                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4336                                 blocker_atom = blocker_atom.lstrip("!")
4337                                 blocker_atoms.append(blocker_atom)
4338                                 blocker_parents.add(blocker_atom, pkg)
4339
4340                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4341                 blocking_pkgs = set()
4342                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4343                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4344
4345                 # Check for blockers in the other direction.
4346                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4347                 try:
4348                         portage.dep._dep_check_strict = False
4349                         success, atoms = portage.dep_check(depstr,
4350                                 vardb, settings, myuse=new_pkg.use.enabled,
4351                                 trees=dep_check_trees, myroot=new_pkg.root)
4352                 finally:
4353                         portage.dep._dep_check_strict = True
4354                 if not success:
4355                         # We should never get this far with invalid deps.
4356                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4357                         assert False
4358
4359                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4360                         if atom[:1] == "!"]
4361                 if blocker_atoms:
4362                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4363                         for inst_pkg in installed_pkgs:
4364                                 try:
4365                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4366                                 except (portage.exception.InvalidDependString, StopIteration):
4367                                         continue
4368                                 blocking_pkgs.add(inst_pkg)
4369
4370                 return blocking_pkgs
4371
4372 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4373
4374         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4375                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4376         p_type, p_root, p_key, p_status = parent_node
4377         msg = []
4378         if p_status == "nomerge":
4379                 category, pf = portage.catsplit(p_key)
4380                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4381                 msg.append("Portage is unable to process the dependencies of the ")
4382                 msg.append("'%s' package. " % p_key)
4383                 msg.append("In order to correct this problem, the package ")
4384                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4385                 msg.append("As a temporary workaround, the --nodeps option can ")
4386                 msg.append("be used to ignore all dependencies.  For reference, ")
4387                 msg.append("the problematic dependencies can be found in the ")
4388                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4389         else:
4390                 msg.append("This package can not be installed. ")
4391                 msg.append("Please notify the '%s' package maintainer " % p_key)
4392                 msg.append("about this problem.")
4393
4394         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4395         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4396
4397 class PackageVirtualDbapi(portage.dbapi):
4398         """
4399         A dbapi-like interface class that represents the state of the installed
4400         package database as new packages are installed, replacing any packages
4401         that previously existed in the same slot. The main difference between
4402         this class and fakedbapi is that this one uses Package instances
4403         internally (passed in via cpv_inject() and cpv_remove() calls).
4404         """
4405         def __init__(self, settings):
4406                 portage.dbapi.__init__(self)
4407                 self.settings = settings
4408                 self._match_cache = {}
4409                 self._cp_map = {}
4410                 self._cpv_map = {}
4411
4412         def clear(self):
4413                 """
4414                 Remove all packages.
4415                 """
4416                 if self._cpv_map:
4417                         self._clear_cache()
4418                         self._cp_map.clear()
4419                         self._cpv_map.clear()
4420
4421         def copy(self):
4422                 obj = PackageVirtualDbapi(self.settings)
4423                 obj._match_cache = self._match_cache.copy()
4424                 obj._cp_map = self._cp_map.copy()
4425                 for k, v in obj._cp_map.iteritems():
4426                         obj._cp_map[k] = v[:]
4427                 obj._cpv_map = self._cpv_map.copy()
4428                 return obj
4429
4430         def __iter__(self):
4431                 return self._cpv_map.itervalues()
4432
4433         def __contains__(self, item):
4434                 existing = self._cpv_map.get(item.cpv)
4435                 if existing is not None and \
4436                         existing == item:
4437                         return True
4438                 return False
4439
4440         def get(self, item, default=None):
4441                 cpv = getattr(item, "cpv", None)
4442                 if cpv is None:
4443                         if len(item) != 4:
4444                                 return default
4445                         type_name, root, cpv, operation = item
4446
4447                 existing = self._cpv_map.get(cpv)
4448                 if existing is not None and \
4449                         existing == item:
4450                         return existing
4451                 return default
4452
4453         def match_pkgs(self, atom):
4454                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4455
4456         def _clear_cache(self):
4457                 if self._categories is not None:
4458                         self._categories = None
4459                 if self._match_cache:
4460                         self._match_cache = {}
4461
4462         def match(self, origdep, use_cache=1):
4463                 result = self._match_cache.get(origdep)
4464                 if result is not None:
4465                         return result[:]
4466                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4467                 self._match_cache[origdep] = result
4468                 return result[:]
4469
4470         def cpv_exists(self, cpv):
4471                 return cpv in self._cpv_map
4472
4473         def cp_list(self, mycp, use_cache=1):
4474                 cachelist = self._match_cache.get(mycp)
4475                 # cp_list() doesn't expand old-style virtuals
4476                 if cachelist and cachelist[0].startswith(mycp):
4477                         return cachelist[:]
4478                 cpv_list = self._cp_map.get(mycp)
4479                 if cpv_list is None:
4480                         cpv_list = []
4481                 else:
4482                         cpv_list = [pkg.cpv for pkg in cpv_list]
4483                 self._cpv_sort_ascending(cpv_list)
4484                 if not (not cpv_list and mycp.startswith("virtual/")):
4485                         self._match_cache[mycp] = cpv_list
4486                 return cpv_list[:]
4487
4488         def cp_all(self):
4489                 return list(self._cp_map)
4490
4491         def cpv_all(self):
4492                 return list(self._cpv_map)
4493
4494         def cpv_inject(self, pkg):
4495                 cp_list = self._cp_map.get(pkg.cp)
4496                 if cp_list is None:
4497                         cp_list = []
4498                         self._cp_map[pkg.cp] = cp_list
4499                 e_pkg = self._cpv_map.get(pkg.cpv)
4500                 if e_pkg is not None:
4501                         if e_pkg == pkg:
4502                                 return
4503                         self.cpv_remove(e_pkg)
4504                 for e_pkg in cp_list:
4505                         if e_pkg.slot_atom == pkg.slot_atom:
4506                                 if e_pkg == pkg:
4507                                         return
4508                                 self.cpv_remove(e_pkg)
4509                                 break
4510                 cp_list.append(pkg)
4511                 self._cpv_map[pkg.cpv] = pkg
4512                 self._clear_cache()
4513
4514         def cpv_remove(self, pkg):
4515                 old_pkg = self._cpv_map.get(pkg.cpv)
4516                 if old_pkg != pkg:
4517                         raise KeyError(pkg)
4518                 self._cp_map[pkg.cp].remove(pkg)
4519                 del self._cpv_map[pkg.cpv]
4520                 self._clear_cache()
4521
4522         def aux_get(self, cpv, wants):
4523                 metadata = self._cpv_map[cpv].metadata
4524                 return [metadata.get(x, "") for x in wants]
4525
4526         def aux_update(self, cpv, values):
4527                 self._cpv_map[cpv].metadata.update(values)
4528                 self._clear_cache()
4529
4530 class depgraph(object):
4531
4532         pkg_tree_map = RootConfig.pkg_tree_map
4533
4534         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4535
4536         def __init__(self, settings, trees, myopts, myparams, spinner):
4537                 self.settings = settings
4538                 self.target_root = settings["ROOT"]
4539                 self.myopts = myopts
4540                 self.myparams = myparams
4541                 self.edebug = 0
4542                 if settings.get("PORTAGE_DEBUG", "") == "1":
4543                         self.edebug = 1
4544                 self.spinner = spinner
4545                 self._running_root = trees["/"]["root_config"]
4546                 self._opts_no_restart = Scheduler._opts_no_restart
4547                 self.pkgsettings = {}
4548                 # Maps slot atom to package for each Package added to the graph.
4549                 self._slot_pkg_map = {}
4550                 # Maps nodes to the reasons they were selected for reinstallation.
4551                 self._reinstall_nodes = {}
4552                 self.mydbapi = {}
4553                 self.trees = {}
4554                 self._trees_orig = trees
4555                 self.roots = {}
4556                 # Contains a filtered view of preferred packages that are selected
4557                 # from available repositories.
4558                 self._filtered_trees = {}
4559                 # Contains installed packages and new packages that have been added
4560                 # to the graph.
4561                 self._graph_trees = {}
4562                 # All Package instances
4563                 self._pkg_cache = {}
4564                 for myroot in trees:
4565                         self.trees[myroot] = {}
4566                         # Create a RootConfig instance that references
4567                         # the FakeVartree instead of the real one.
4568                         self.roots[myroot] = RootConfig(
4569                                 trees[myroot]["vartree"].settings,
4570                                 self.trees[myroot],
4571                                 trees[myroot]["root_config"].setconfig)
4572                         for tree in ("porttree", "bintree"):
4573                                 self.trees[myroot][tree] = trees[myroot][tree]
4574                         self.trees[myroot]["vartree"] = \
4575                                 FakeVartree(trees[myroot]["root_config"],
4576                                         pkg_cache=self._pkg_cache)
4577                         self.pkgsettings[myroot] = portage.config(
4578                                 clone=self.trees[myroot]["vartree"].settings)
4579                         self._slot_pkg_map[myroot] = {}
4580                         vardb = self.trees[myroot]["vartree"].dbapi
4581                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4582                                 "--buildpkgonly" not in self.myopts
4583                         # This fakedbapi instance will model the state that the vdb will
4584                         # have after new packages have been installed.
4585                         fakedb = PackageVirtualDbapi(vardb.settings)
4586                         if preload_installed_pkgs:
4587                                 for pkg in vardb:
4588                                         self.spinner.update()
4589                                         # This triggers metadata updates via FakeVartree.
4590                                         vardb.aux_get(pkg.cpv, [])
4591                                         fakedb.cpv_inject(pkg)
4592
4593                         # Now that the vardb state is cached in our FakeVartree,
4594                         # we won't be needing the real vartree cache for awhile.
4595                         # To make some room on the heap, clear the vardbapi
4596                         # caches.
4597                         trees[myroot]["vartree"].dbapi._clear_cache()
4598                         gc.collect()
4599
4600                         self.mydbapi[myroot] = fakedb
4601                         def graph_tree():
4602                                 pass
4603                         graph_tree.dbapi = fakedb
4604                         self._graph_trees[myroot] = {}
4605                         self._filtered_trees[myroot] = {}
4606                         # Substitute the graph tree for the vartree in dep_check() since we
4607                         # want atom selections to be consistent with package selections
4608                         # have already been made.
4609                         self._graph_trees[myroot]["porttree"]   = graph_tree
4610                         self._graph_trees[myroot]["vartree"]    = graph_tree
4611                         def filtered_tree():
4612                                 pass
4613                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4614                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4615
4616                         # Passing in graph_tree as the vartree here could lead to better
4617                         # atom selections in some cases by causing atoms for packages that
4618                         # have been added to the graph to be preferred over other choices.
4619                         # However, it can trigger atom selections that result in
4620                         # unresolvable direct circular dependencies. For example, this
4621                         # happens with gwydion-dylan which depends on either itself or
4622                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4623                         # gwydion-dylan-bin needs to be selected in order to avoid a
4624                         # an unresolvable direct circular dependency.
4625                         #
4626                         # To solve the problem described above, pass in "graph_db" so that
4627                         # packages that have been added to the graph are distinguishable
4628                         # from other available packages and installed packages. Also, pass
4629                         # the parent package into self._select_atoms() calls so that
4630                         # unresolvable direct circular dependencies can be detected and
4631                         # avoided when possible.
4632                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4633                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4634
4635                         dbs = []
4636                         portdb = self.trees[myroot]["porttree"].dbapi
4637                         bindb  = self.trees[myroot]["bintree"].dbapi
4638                         vardb  = self.trees[myroot]["vartree"].dbapi
4639                         #               (db, pkg_type, built, installed, db_keys)
4640                         if "--usepkgonly" not in self.myopts:
4641                                 db_keys = list(portdb._aux_cache_keys)
4642                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4643                         if "--usepkg" in self.myopts:
4644                                 db_keys = list(bindb._aux_cache_keys)
4645                                 dbs.append((bindb,  "binary", True, False, db_keys))
4646                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4647                         dbs.append((vardb, "installed", True, True, db_keys))
4648                         self._filtered_trees[myroot]["dbs"] = dbs
4649                         if "--usepkg" in self.myopts:
4650                                 self.trees[myroot]["bintree"].populate(
4651                                         "--getbinpkg" in self.myopts,
4652                                         "--getbinpkgonly" in self.myopts)
4653                 del trees
4654
4655                 self.digraph=portage.digraph()
4656                 # contains all sets added to the graph
4657                 self._sets = {}
4658                 # contains atoms given as arguments
4659                 self._sets["args"] = InternalPackageSet()
4660                 # contains all atoms from all sets added to the graph, including
4661                 # atoms given as arguments
4662                 self._set_atoms = InternalPackageSet()
4663                 self._atom_arg_map = {}
4664                 # contains all nodes pulled in by self._set_atoms
4665                 self._set_nodes = set()
4666                 # Contains only Blocker -> Uninstall edges
4667                 self._blocker_uninstalls = digraph()
4668                 # Contains only Package -> Blocker edges
4669                 self._blocker_parents = digraph()
4670                 # Contains only irrelevant Package -> Blocker edges
4671                 self._irrelevant_blockers = digraph()
4672                 # Contains only unsolvable Package -> Blocker edges
4673                 self._unsolvable_blockers = digraph()
4674                 # Contains all Blocker -> Blocked Package edges
4675                 self._blocked_pkgs = digraph()
4676                 # Contains world packages that have been protected from
4677                 # uninstallation but may not have been added to the graph
4678                 # if the graph is not complete yet.
4679                 self._blocked_world_pkgs = {}
4680                 self._slot_collision_info = {}
4681                 # Slot collision nodes are not allowed to block other packages since
4682                 # blocker validation is only able to account for one package per slot.
4683                 self._slot_collision_nodes = set()
4684                 self._parent_atoms = {}
4685                 self._slot_conflict_parent_atoms = set()
4686                 self._serialized_tasks_cache = None
4687                 self._scheduler_graph = None
4688                 self._displayed_list = None
4689                 self._pprovided_args = []
4690                 self._missing_args = []
4691                 self._masked_installed = set()
4692                 self._unsatisfied_deps_for_display = []
4693                 self._unsatisfied_blockers_for_display = None
4694                 self._circular_deps_for_display = None
4695                 self._dep_stack = []
4696                 self._unsatisfied_deps = []
4697                 self._initially_unsatisfied_deps = []
4698                 self._ignored_deps = []
4699                 self._required_set_names = set(["system", "world"])
4700                 self._select_atoms = self._select_atoms_highest_available
4701                 self._select_package = self._select_pkg_highest_available
4702                 self._highest_pkg_cache = {}
4703
4704         def _show_slot_collision_notice(self):
4705                 """Show an informational message advising the user to mask one of the
4706                 the packages. In some cases it may be possible to resolve this
4707                 automatically, but support for backtracking (removal nodes that have
4708                 already been selected) will be required in order to handle all possible
4709                 cases.
4710                 """
4711
4712                 if not self._slot_collision_info:
4713                         return
4714
4715                 self._show_merge_list()
4716
4717                 msg = []
4718                 msg.append("\n!!! Multiple package instances within a single " + \
4719                         "package slot have been pulled\n")
4720                 msg.append("!!! into the dependency graph, resulting" + \
4721                         " in a slot conflict:\n\n")
4722                 indent = "  "
4723                 # Max number of parents shown, to avoid flooding the display.
4724                 max_parents = 3
4725                 explanation_columns = 70
4726                 explanations = 0
4727                 for (slot_atom, root), slot_nodes \
4728                         in self._slot_collision_info.iteritems():
4729                         msg.append(str(slot_atom))
4730                         msg.append("\n\n")
4731
4732                         for node in slot_nodes:
4733                                 msg.append(indent)
4734                                 msg.append(str(node))
4735                                 parent_atoms = self._parent_atoms.get(node)
4736                                 if parent_atoms:
4737                                         pruned_list = set()
4738                                         # Prefer conflict atoms over others.
4739                                         for parent_atom in parent_atoms:
4740                                                 if len(pruned_list) >= max_parents:
4741                                                         break
4742                                                 if parent_atom in self._slot_conflict_parent_atoms:
4743                                                         pruned_list.add(parent_atom)
4744
4745                                         # If this package was pulled in by conflict atoms then
4746                                         # show those alone since those are the most interesting.
4747                                         if not pruned_list:
4748                                                 # When generating the pruned list, prefer instances
4749                                                 # of DependencyArg over instances of Package.
4750                                                 for parent_atom in parent_atoms:
4751                                                         if len(pruned_list) >= max_parents:
4752                                                                 break
4753                                                         parent, atom = parent_atom
4754                                                         if isinstance(parent, DependencyArg):
4755                                                                 pruned_list.add(parent_atom)
4756                                                 # Prefer Packages instances that themselves have been
4757                                                 # pulled into collision slots.
4758                                                 for parent_atom in parent_atoms:
4759                                                         if len(pruned_list) >= max_parents:
4760                                                                 break
4761                                                         parent, atom = parent_atom
4762                                                         if isinstance(parent, Package) and \
4763                                                                 (parent.slot_atom, parent.root) \
4764                                                                 in self._slot_collision_info:
4765                                                                 pruned_list.add(parent_atom)
4766                                                 for parent_atom in parent_atoms:
4767                                                         if len(pruned_list) >= max_parents:
4768                                                                 break
4769                                                         pruned_list.add(parent_atom)
4770                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4771                                         parent_atoms = pruned_list
4772                                         msg.append(" pulled in by\n")
4773                                         for parent_atom in parent_atoms:
4774                                                 parent, atom = parent_atom
4775                                                 msg.append(2*indent)
4776                                                 if isinstance(parent,
4777                                                         (PackageArg, AtomArg)):
4778                                                         # For PackageArg and AtomArg types, it's
4779                                                         # redundant to display the atom attribute.
4780                                                         msg.append(str(parent))
4781                                                 else:
4782                                                         # Display the specific atom from SetArg or
4783                                                         # Package types.
4784                                                         msg.append("%s required by %s" % (atom, parent))
4785                                                 msg.append("\n")
4786                                         if omitted_parents:
4787                                                 msg.append(2*indent)
4788                                                 msg.append("(and %d more)\n" % omitted_parents)
4789                                 else:
4790                                         msg.append(" (no parents)\n")
4791                                 msg.append("\n")
4792                         explanation = self._slot_conflict_explanation(slot_nodes)
4793                         if explanation:
4794                                 explanations += 1
4795                                 msg.append(indent + "Explanation:\n\n")
4796                                 for line in textwrap.wrap(explanation, explanation_columns):
4797                                         msg.append(2*indent + line + "\n")
4798                                 msg.append("\n")
4799                 msg.append("\n")
4800                 sys.stderr.write("".join(msg))
4801                 sys.stderr.flush()
4802
4803                 explanations_for_all = explanations == len(self._slot_collision_info)
4804
4805                 if explanations_for_all or "--quiet" in self.myopts:
4806                         return
4807
4808                 msg = []
4809                 msg.append("It may be possible to solve this problem ")
4810                 msg.append("by using package.mask to prevent one of ")
4811                 msg.append("those packages from being selected. ")
4812                 msg.append("However, it is also possible that conflicting ")
4813                 msg.append("dependencies exist such that they are impossible to ")
4814                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4815                 msg.append("the dependencies of two different packages, then those ")
4816                 msg.append("packages can not be installed simultaneously.")
4817
4818                 from formatter import AbstractFormatter, DumbWriter
4819                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4820                 for x in msg:
4821                         f.add_flowing_data(x)
4822                 f.end_paragraph(1)
4823
4824                 msg = []
4825                 msg.append("For more information, see MASKED PACKAGES ")
4826                 msg.append("section in the emerge man page or refer ")
4827                 msg.append("to the Gentoo Handbook.")
4828                 for x in msg:
4829                         f.add_flowing_data(x)
4830                 f.end_paragraph(1)
4831                 f.writer.flush()
4832
4833         def _slot_conflict_explanation(self, slot_nodes):
4834                 """
4835                 When a slot conflict occurs due to USE deps, there are a few
4836                 different cases to consider:
4837
4838                 1) New USE are correctly set but --newuse wasn't requested so an
4839                    installed package with incorrect USE happened to get pulled
4840                    into graph before the new one.
4841
4842                 2) New USE are incorrectly set but an installed package has correct
4843                    USE so it got pulled into the graph, and a new instance also got
4844                    pulled in due to --newuse or an upgrade.
4845
4846                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4847                    and multiple package instances got pulled into the same slot to
4848                    satisfy the conflicting deps.
4849
4850                 Currently, explanations and suggested courses of action are generated
4851                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4852                 """
4853
4854                 if len(slot_nodes) != 2:
4855                         # Suggestions are only implemented for
4856                         # conflicts between two packages.
4857                         return None
4858
4859                 all_conflict_atoms = self._slot_conflict_parent_atoms
4860                 matched_node = None
4861                 matched_atoms = None
4862                 unmatched_node = None
4863                 for node in slot_nodes:
4864                         parent_atoms = self._parent_atoms.get(node)
4865                         if not parent_atoms:
4866                                 # Normally, there are always parent atoms. If there are
4867                                 # none then something unexpected is happening and there's
4868                                 # currently no suggestion for this case.
4869                                 return None
4870                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4871                         for parent_atom in conflict_atoms:
4872                                 parent, atom = parent_atom
4873                                 if not atom.use:
4874                                         # Suggestions are currently only implemented for cases
4875                                         # in which all conflict atoms have USE deps.
4876                                         return None
4877                         if conflict_atoms:
4878                                 if matched_node is not None:
4879                                         # If conflict atoms match multiple nodes
4880                                         # then there's no suggestion.
4881                                         return None
4882                                 matched_node = node
4883                                 matched_atoms = conflict_atoms
4884                         else:
4885                                 if unmatched_node is not None:
4886                                         # Neither node is matched by conflict atoms, and
4887                                         # there is no suggestion for this case.
4888                                         return None
4889                                 unmatched_node = node
4890
4891                 if matched_node is None or unmatched_node is None:
4892                         # This shouldn't happen.
4893                         return None
4894
4895                 if unmatched_node.installed and not matched_node.installed and \
4896                         unmatched_node.cpv == matched_node.cpv:
4897                         # If the conflicting packages are the same version then
4898                         # --newuse should be all that's needed. If they are different
4899                         # versions then there's some other problem.
4900                         return "New USE are correctly set, but --newuse wasn't" + \
4901                                 " requested, so an installed package with incorrect USE " + \
4902                                 "happened to get pulled into the dependency graph. " + \
4903                                 "In order to solve " + \
4904                                 "this, either specify the --newuse option or explicitly " + \
4905                                 " reinstall '%s'." % matched_node.slot_atom
4906
4907                 if matched_node.installed and not unmatched_node.installed:
4908                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4909                         explanation = ("New USE for '%s' are incorrectly set. " + \
4910                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4911                                 (matched_node.slot_atom, atoms[0])
4912                         if len(atoms) > 1:
4913                                 for atom in atoms[1:-1]:
4914                                         explanation += ", '%s'" % (atom,)
4915                                 if len(atoms) > 2:
4916                                         explanation += ","
4917                                 explanation += " and '%s'" % (atoms[-1],)
4918                         explanation += "."
4919                         return explanation
4920
4921                 return None
4922
4923         def _process_slot_conflicts(self):
4924                 """
4925                 Process slot conflict data to identify specific atoms which
4926                 lead to conflict. These atoms only match a subset of the
4927                 packages that have been pulled into a given slot.
4928                 """
4929                 for (slot_atom, root), slot_nodes \
4930                         in self._slot_collision_info.iteritems():
4931
4932                         all_parent_atoms = set()
4933                         for pkg in slot_nodes:
4934                                 parent_atoms = self._parent_atoms.get(pkg)
4935                                 if not parent_atoms:
4936                                         continue
4937                                 all_parent_atoms.update(parent_atoms)
4938
4939                         for pkg in slot_nodes:
4940                                 parent_atoms = self._parent_atoms.get(pkg)
4941                                 if parent_atoms is None:
4942                                         parent_atoms = set()
4943                                         self._parent_atoms[pkg] = parent_atoms
4944                                 for parent_atom in all_parent_atoms:
4945                                         if parent_atom in parent_atoms:
4946                                                 continue
4947                                         # Use package set for matching since it will match via
4948                                         # PROVIDE when necessary, while match_from_list does not.
4949                                         parent, atom = parent_atom
4950                                         atom_set = InternalPackageSet(
4951                                                 initial_atoms=(atom,))
4952                                         if atom_set.findAtomForPackage(pkg):
4953                                                 parent_atoms.add(parent_atom)
4954                                         else:
4955                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4956
4957         def _reinstall_for_flags(self, forced_flags,
4958                 orig_use, orig_iuse, cur_use, cur_iuse):
4959                 """Return a set of flags that trigger reinstallation, or None if there
4960                 are no such flags."""
4961                 if "--newuse" in self.myopts:
4962                         flags = set(orig_iuse.symmetric_difference(
4963                                 cur_iuse).difference(forced_flags))
4964                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4965                                 cur_iuse.intersection(cur_use)))
4966                         if flags:
4967                                 return flags
4968                 elif "changed-use" == self.myopts.get("--reinstall"):
4969                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4970                                 cur_iuse.intersection(cur_use))
4971                         if flags:
4972                                 return flags
4973                 return None
4974
4975         def _create_graph(self, allow_unsatisfied=False):
4976                 dep_stack = self._dep_stack
4977                 while dep_stack:
4978                         self.spinner.update()
4979                         dep = dep_stack.pop()
4980                         if isinstance(dep, Package):
4981                                 if not self._add_pkg_deps(dep,
4982                                         allow_unsatisfied=allow_unsatisfied):
4983                                         return 0
4984                                 continue
4985                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4986                                 return 0
4987                 return 1
4988
4989         def _add_dep(self, dep, allow_unsatisfied=False):
4990                 debug = "--debug" in self.myopts
4991                 buildpkgonly = "--buildpkgonly" in self.myopts
4992                 nodeps = "--nodeps" in self.myopts
4993                 empty = "empty" in self.myparams
4994                 deep = "deep" in self.myparams
4995                 update = "--update" in self.myopts and dep.depth <= 1
4996                 if dep.blocker:
4997                         if not buildpkgonly and \
4998                                 not nodeps and \
4999                                 dep.parent not in self._slot_collision_nodes:
5000                                 if dep.parent.onlydeps:
5001                                         # It's safe to ignore blockers if the
5002                                         # parent is an --onlydeps node.
5003                                         return 1
5004                                 # The blocker applies to the root where
5005                                 # the parent is or will be installed.
5006                                 blocker = Blocker(atom=dep.atom,
5007                                         eapi=dep.parent.metadata["EAPI"],
5008                                         root=dep.parent.root)
5009                                 self._blocker_parents.add(blocker, dep.parent)
5010                         return 1
5011                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5012                         onlydeps=dep.onlydeps)
5013                 if not dep_pkg:
5014                         if dep.priority.optional:
5015                                 # This could be an unecessary build-time dep
5016                                 # pulled in by --with-bdeps=y.
5017                                 return 1
5018                         if allow_unsatisfied:
5019                                 self._unsatisfied_deps.append(dep)
5020                                 return 1
5021                         self._unsatisfied_deps_for_display.append(
5022                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5023                         return 0
5024                 # In some cases, dep_check will return deps that shouldn't
5025                 # be proccessed any further, so they are identified and
5026                 # discarded here. Try to discard as few as possible since
5027                 # discarded dependencies reduce the amount of information
5028                 # available for optimization of merge order.
5029                 if dep.priority.satisfied and \
5030                         not dep_pkg.installed and \
5031                         not (existing_node or empty or deep or update):
5032                         myarg = None
5033                         if dep.root == self.target_root:
5034                                 try:
5035                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5036                                 except StopIteration:
5037                                         pass
5038                                 except portage.exception.InvalidDependString:
5039                                         if not dep_pkg.installed:
5040                                                 # This shouldn't happen since the package
5041                                                 # should have been masked.
5042                                                 raise
5043                         if not myarg:
5044                                 self._ignored_deps.append(dep)
5045                                 return 1
5046
5047                 if not self._add_pkg(dep_pkg, dep):
5048                         return 0
5049                 return 1
5050
5051         def _add_pkg(self, pkg, dep):
5052                 myparent = None
5053                 priority = None
5054                 depth = 0
5055                 if dep is None:
5056                         dep = Dependency()
5057                 else:
5058                         myparent = dep.parent
5059                         priority = dep.priority
5060                         depth = dep.depth
5061                 if priority is None:
5062                         priority = DepPriority()
5063                 """
5064                 Fills the digraph with nodes comprised of packages to merge.
5065                 mybigkey is the package spec of the package to merge.
5066                 myparent is the package depending on mybigkey ( or None )
5067                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5068                         Think --onlydeps, we need to ignore packages in that case.
5069                 #stuff to add:
5070                 #SLOT-aware emerge
5071                 #IUSE-aware emerge -> USE DEP aware depgraph
5072                 #"no downgrade" emerge
5073                 """
5074                 # Ensure that the dependencies of the same package
5075                 # are never processed more than once.
5076                 previously_added = pkg in self.digraph
5077
5078                 # select the correct /var database that we'll be checking against
5079                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5080                 pkgsettings = self.pkgsettings[pkg.root]
5081
5082                 arg_atoms = None
5083                 if True:
5084                         try:
5085                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5086                         except portage.exception.InvalidDependString, e:
5087                                 if not pkg.installed:
5088                                         show_invalid_depstring_notice(
5089                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5090                                         return 0
5091                                 del e
5092
5093                 if not pkg.onlydeps:
5094                         if not pkg.installed and \
5095                                 "empty" not in self.myparams and \
5096                                 vardbapi.match(pkg.slot_atom):
5097                                 # Increase the priority of dependencies on packages that
5098                                 # are being rebuilt. This optimizes merge order so that
5099                                 # dependencies are rebuilt/updated as soon as possible,
5100                                 # which is needed especially when emerge is called by
5101                                 # revdep-rebuild since dependencies may be affected by ABI
5102                                 # breakage that has rendered them useless. Don't adjust
5103                                 # priority here when in "empty" mode since all packages
5104                                 # are being merged in that case.
5105                                 priority.rebuild = True
5106
5107                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5108                         slot_collision = False
5109                         if existing_node:
5110                                 existing_node_matches = pkg.cpv == existing_node.cpv
5111                                 if existing_node_matches and \
5112                                         pkg != existing_node and \
5113                                         dep.atom is not None:
5114                                         # Use package set for matching since it will match via
5115                                         # PROVIDE when necessary, while match_from_list does not.
5116                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5117                                         if not atom_set.findAtomForPackage(existing_node):
5118                                                 existing_node_matches = False
5119                                 if existing_node_matches:
5120                                         # The existing node can be reused.
5121                                         if arg_atoms:
5122                                                 for parent_atom in arg_atoms:
5123                                                         parent, atom = parent_atom
5124                                                         self.digraph.add(existing_node, parent,
5125                                                                 priority=priority)
5126                                                         self._add_parent_atom(existing_node, parent_atom)
5127                                         # If a direct circular dependency is not an unsatisfied
5128                                         # buildtime dependency then drop it here since otherwise
5129                                         # it can skew the merge order calculation in an unwanted
5130                                         # way.
5131                                         if existing_node != myparent or \
5132                                                 (priority.buildtime and not priority.satisfied):
5133                                                 self.digraph.addnode(existing_node, myparent,
5134                                                         priority=priority)
5135                                                 if dep.atom is not None and dep.parent is not None:
5136                                                         self._add_parent_atom(existing_node,
5137                                                                 (dep.parent, dep.atom))
5138                                         return 1
5139                                 else:
5140
5141                                         # A slot collision has occurred.  Sometimes this coincides
5142                                         # with unresolvable blockers, so the slot collision will be
5143                                         # shown later if there are no unresolvable blockers.
5144                                         self._add_slot_conflict(pkg)
5145                                         slot_collision = True
5146
5147                         if slot_collision:
5148                                 # Now add this node to the graph so that self.display()
5149                                 # can show use flags and --tree portage.output.  This node is
5150                                 # only being partially added to the graph.  It must not be
5151                                 # allowed to interfere with the other nodes that have been
5152                                 # added.  Do not overwrite data for existing nodes in
5153                                 # self.mydbapi since that data will be used for blocker
5154                                 # validation.
5155                                 # Even though the graph is now invalid, continue to process
5156                                 # dependencies so that things like --fetchonly can still
5157                                 # function despite collisions.
5158                                 pass
5159                         elif not previously_added:
5160                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5161                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5162                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5163
5164                         if not pkg.installed:
5165                                 # Allow this package to satisfy old-style virtuals in case it
5166                                 # doesn't already. Any pre-existing providers will be preferred
5167                                 # over this one.
5168                                 try:
5169                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5170                                         # For consistency, also update the global virtuals.
5171                                         settings = self.roots[pkg.root].settings
5172                                         settings.unlock()
5173                                         settings.setinst(pkg.cpv, pkg.metadata)
5174                                         settings.lock()
5175                                 except portage.exception.InvalidDependString, e:
5176                                         show_invalid_depstring_notice(
5177                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5178                                         del e
5179                                         return 0
5180
5181                 if arg_atoms:
5182                         self._set_nodes.add(pkg)
5183
5184                 # Do this even when addme is False (--onlydeps) so that the
5185                 # parent/child relationship is always known in case
5186                 # self._show_slot_collision_notice() needs to be called later.
5187                 self.digraph.add(pkg, myparent, priority=priority)
5188                 if dep.atom is not None and dep.parent is not None:
5189                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5190
5191                 if arg_atoms:
5192                         for parent_atom in arg_atoms:
5193                                 parent, atom = parent_atom
5194                                 self.digraph.add(pkg, parent, priority=priority)
5195                                 self._add_parent_atom(pkg, parent_atom)
5196
5197                 """ This section determines whether we go deeper into dependencies or not.
5198                     We want to go deeper on a few occasions:
5199                     Installing package A, we need to make sure package A's deps are met.
5200                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5201                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5202                 """
5203                 dep_stack = self._dep_stack
5204                 if "recurse" not in self.myparams:
5205                         return 1
5206                 elif pkg.installed and \
5207                         "deep" not in self.myparams:
5208                         dep_stack = self._ignored_deps
5209
5210                 self.spinner.update()
5211
5212                 if arg_atoms:
5213                         depth = 0
5214                 pkg.depth = depth
5215                 if not previously_added:
5216                         dep_stack.append(pkg)
5217                 return 1
5218
5219         def _add_parent_atom(self, pkg, parent_atom):
5220                 parent_atoms = self._parent_atoms.get(pkg)
5221                 if parent_atoms is None:
5222                         parent_atoms = set()
5223                         self._parent_atoms[pkg] = parent_atoms
5224                 parent_atoms.add(parent_atom)
5225
5226         def _add_slot_conflict(self, pkg):
5227                 self._slot_collision_nodes.add(pkg)
5228                 slot_key = (pkg.slot_atom, pkg.root)
5229                 slot_nodes = self._slot_collision_info.get(slot_key)
5230                 if slot_nodes is None:
5231                         slot_nodes = set()
5232                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5233                         self._slot_collision_info[slot_key] = slot_nodes
5234                 slot_nodes.add(pkg)
5235
5236         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5237
5238                 mytype = pkg.type_name
5239                 myroot = pkg.root
5240                 mykey = pkg.cpv
5241                 metadata = pkg.metadata
5242                 myuse = pkg.use.enabled
5243                 jbigkey = pkg
5244                 depth = pkg.depth + 1
5245                 removal_action = "remove" in self.myparams
5246
5247                 edepend={}
5248                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5249                 for k in depkeys:
5250                         edepend[k] = metadata[k]
5251
5252                 if not pkg.built and \
5253                         "--buildpkgonly" in self.myopts and \
5254                         "deep" not in self.myparams and \
5255                         "empty" not in self.myparams:
5256                         edepend["RDEPEND"] = ""
5257                         edepend["PDEPEND"] = ""
5258                 bdeps_optional = False
5259
5260                 if pkg.built and not removal_action:
5261                         if self.myopts.get("--with-bdeps", "n") == "y":
5262                                 # Pull in build time deps as requested, but marked them as
5263                                 # "optional" since they are not strictly required. This allows
5264                                 # more freedom in the merge order calculation for solving
5265                                 # circular dependencies. Don't convert to PDEPEND since that
5266                                 # could make --with-bdeps=y less effective if it is used to
5267                                 # adjust merge order to prevent built_with_use() calls from
5268                                 # failing.
5269                                 bdeps_optional = True
5270                         else:
5271                                 # built packages do not have build time dependencies.
5272                                 edepend["DEPEND"] = ""
5273
5274                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5275                         edepend["DEPEND"] = ""
5276
5277                 bdeps_root = "/"
5278                 root_deps = self.myopts.get("--root-deps")
5279                 if root_deps is not None:
5280                         if root_deps is True:
5281                                 bdeps_root = myroot
5282                         elif root_deps == "rdeps":
5283                                 edepend["DEPEND"] = ""
5284
5285                 deps = (
5286                         (bdeps_root, edepend["DEPEND"],
5287                                 self._priority(buildtime=(not bdeps_optional),
5288                                 optional=bdeps_optional)),
5289                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5290                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5291                 )
5292
5293                 debug = "--debug" in self.myopts
5294                 strict = mytype != "installed"
5295                 try:
5296                         for dep_root, dep_string, dep_priority in deps:
5297                                 if not dep_string:
5298                                         continue
5299                                 if debug:
5300                                         print
5301                                         print "Parent:   ", jbigkey
5302                                         print "Depstring:", dep_string
5303                                         print "Priority:", dep_priority
5304                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5305                                 try:
5306                                         selected_atoms = self._select_atoms(dep_root,
5307                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5308                                                 priority=dep_priority)
5309                                 except portage.exception.InvalidDependString, e:
5310                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5311                                         return 0
5312                                 if debug:
5313                                         print "Candidates:", selected_atoms
5314
5315                                 for atom in selected_atoms:
5316                                         try:
5317
5318                                                 atom = portage.dep.Atom(atom)
5319
5320                                                 mypriority = dep_priority.copy()
5321                                                 if not atom.blocker and vardb.match(atom):
5322                                                         mypriority.satisfied = True
5323
5324                                                 if not self._add_dep(Dependency(atom=atom,
5325                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5326                                                         priority=mypriority, root=dep_root),
5327                                                         allow_unsatisfied=allow_unsatisfied):
5328                                                         return 0
5329
5330                                         except portage.exception.InvalidAtom, e:
5331                                                 show_invalid_depstring_notice(
5332                                                         pkg, dep_string, str(e))
5333                                                 del e
5334                                                 if not pkg.installed:
5335                                                         return 0
5336
5337                                 if debug:
5338                                         print "Exiting...", jbigkey
5339                 except portage.exception.AmbiguousPackageName, e:
5340                         pkgs = e.args[0]
5341                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5342                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5343                         for cpv in pkgs:
5344                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5345                         portage.writemsg("\n", noiselevel=-1)
5346                         if mytype == "binary":
5347                                 portage.writemsg(
5348                                         "!!! This binary package cannot be installed: '%s'\n" % \
5349                                         mykey, noiselevel=-1)
5350                         elif mytype == "ebuild":
5351                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5352                                 myebuild, mylocation = portdb.findname2(mykey)
5353                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5354                                         "'%s'\n" % myebuild, noiselevel=-1)
5355                         portage.writemsg("!!! Please notify the package maintainer " + \
5356                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5357                         return 0
5358                 return 1
5359
5360         def _priority(self, **kwargs):
5361                 if "remove" in self.myparams:
5362                         priority_constructor = UnmergeDepPriority
5363                 else:
5364                         priority_constructor = DepPriority
5365                 return priority_constructor(**kwargs)
5366
5367         def _dep_expand(self, root_config, atom_without_category):
5368                 """
5369                 @param root_config: a root config instance
5370                 @type root_config: RootConfig
5371                 @param atom_without_category: an atom without a category component
5372                 @type atom_without_category: String
5373                 @rtype: list
5374                 @returns: a list of atoms containing categories (possibly empty)
5375                 """
5376                 null_cp = portage.dep_getkey(insert_category_into_atom(
5377                         atom_without_category, "null"))
5378                 cat, atom_pn = portage.catsplit(null_cp)
5379
5380                 dbs = self._filtered_trees[root_config.root]["dbs"]
5381                 categories = set()
5382                 for db, pkg_type, built, installed, db_keys in dbs:
5383                         for cat in db.categories:
5384                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5385                                         categories.add(cat)
5386
5387                 deps = []
5388                 for cat in categories:
5389                         deps.append(insert_category_into_atom(
5390                                 atom_without_category, cat))
5391                 return deps
5392
5393         def _have_new_virt(self, root, atom_cp):
5394                 ret = False
5395                 for db, pkg_type, built, installed, db_keys in \
5396                         self._filtered_trees[root]["dbs"]:
5397                         if db.cp_list(atom_cp):
5398                                 ret = True
5399                                 break
5400                 return ret
5401
5402         def _iter_atoms_for_pkg(self, pkg):
5403                 # TODO: add multiple $ROOT support
5404                 if pkg.root != self.target_root:
5405                         return
5406                 atom_arg_map = self._atom_arg_map
5407                 root_config = self.roots[pkg.root]
5408                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5409                         atom_cp = portage.dep_getkey(atom)
5410                         if atom_cp != pkg.cp and \
5411                                 self._have_new_virt(pkg.root, atom_cp):
5412                                 continue
5413                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5414                         visible_pkgs.reverse() # descending order
5415                         higher_slot = None
5416                         for visible_pkg in visible_pkgs:
5417                                 if visible_pkg.cp != atom_cp:
5418                                         continue
5419                                 if pkg >= visible_pkg:
5420                                         # This is descending order, and we're not
5421                                         # interested in any versions <= pkg given.
5422                                         break
5423                                 if pkg.slot_atom != visible_pkg.slot_atom:
5424                                         higher_slot = visible_pkg
5425                                         break
5426                         if higher_slot is not None:
5427                                 continue
5428                         for arg in atom_arg_map[(atom, pkg.root)]:
5429                                 if isinstance(arg, PackageArg) and \
5430                                         arg.package != pkg:
5431                                         continue
5432                                 yield arg, atom
5433
5434         def select_files(self, myfiles):
5435                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5436                 appropriate depgraph and return a favorite list."""
5437                 debug = "--debug" in self.myopts
5438                 root_config = self.roots[self.target_root]
5439                 sets = root_config.sets
5440                 getSetAtoms = root_config.setconfig.getSetAtoms
5441                 myfavorites=[]
5442                 myroot = self.target_root
5443                 dbs = self._filtered_trees[myroot]["dbs"]
5444                 vardb = self.trees[myroot]["vartree"].dbapi
5445                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5446                 portdb = self.trees[myroot]["porttree"].dbapi
5447                 bindb = self.trees[myroot]["bintree"].dbapi
5448                 pkgsettings = self.pkgsettings[myroot]
5449                 args = []
5450                 onlydeps = "--onlydeps" in self.myopts
5451                 lookup_owners = []
5452                 for x in myfiles:
5453                         ext = os.path.splitext(x)[1]
5454                         if ext==".tbz2":
5455                                 if not os.path.exists(x):
5456                                         if os.path.exists(
5457                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5458                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5459                                         elif os.path.exists(
5460                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5461                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5462                                         else:
5463                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5464                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5465                                                 return 0, myfavorites
5466                                 mytbz2=portage.xpak.tbz2(x)
5467                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5468                                 if os.path.realpath(x) != \
5469                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5470                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5471                                         return 0, myfavorites
5472                                 db_keys = list(bindb._aux_cache_keys)
5473                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5474                                 pkg = Package(type_name="binary", root_config=root_config,
5475                                         cpv=mykey, built=True, metadata=metadata,
5476                                         onlydeps=onlydeps)
5477                                 self._pkg_cache[pkg] = pkg
5478                                 args.append(PackageArg(arg=x, package=pkg,
5479                                         root_config=root_config))
5480                         elif ext==".ebuild":
5481                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5482                                 pkgdir = os.path.dirname(ebuild_path)
5483                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5484                                 cp = pkgdir[len(tree_root)+1:]
5485                                 e = portage.exception.PackageNotFound(
5486                                         ("%s is not in a valid portage tree " + \
5487                                         "hierarchy or does not exist") % x)
5488                                 if not portage.isvalidatom(cp):
5489                                         raise e
5490                                 cat = portage.catsplit(cp)[0]
5491                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5492                                 if not portage.isvalidatom("="+mykey):
5493                                         raise e
5494                                 ebuild_path = portdb.findname(mykey)
5495                                 if ebuild_path:
5496                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5497                                                 cp, os.path.basename(ebuild_path)):
5498                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5499                                                 return 0, myfavorites
5500                                         if mykey not in portdb.xmatch(
5501                                                 "match-visible", portage.dep_getkey(mykey)):
5502                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5503                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5504                                                 print colorize("BAD", "*** page for details.")
5505                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5506                                                         "Continuing...")
5507                                 else:
5508                                         raise portage.exception.PackageNotFound(
5509                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5510                                 db_keys = list(portdb._aux_cache_keys)
5511                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5512                                 pkg = Package(type_name="ebuild", root_config=root_config,
5513                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5514                                 pkgsettings.setcpv(pkg)
5515                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5516                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5517                                 self._pkg_cache[pkg] = pkg
5518                                 args.append(PackageArg(arg=x, package=pkg,
5519                                         root_config=root_config))
5520                         elif x.startswith(os.path.sep):
5521                                 if not x.startswith(myroot):
5522                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5523                                                 " $ROOT.\n") % x, noiselevel=-1)
5524                                         return 0, []
5525                                 # Queue these up since it's most efficient to handle
5526                                 # multiple files in a single iter_owners() call.
5527                                 lookup_owners.append(x)
5528                         else:
5529                                 if x in ("system", "world"):
5530                                         x = SETPREFIX + x
5531                                 if x.startswith(SETPREFIX):
5532                                         s = x[len(SETPREFIX):]
5533                                         if s not in sets:
5534                                                 raise portage.exception.PackageSetNotFound(s)
5535                                         if s in self._sets:
5536                                                 continue
5537                                         # Recursively expand sets so that containment tests in
5538                                         # self._get_parent_sets() properly match atoms in nested
5539                                         # sets (like if world contains system).
5540                                         expanded_set = InternalPackageSet(
5541                                                 initial_atoms=getSetAtoms(s))
5542                                         self._sets[s] = expanded_set
5543                                         args.append(SetArg(arg=x, set=expanded_set,
5544                                                 root_config=root_config))
5545                                         continue
5546                                 if not is_valid_package_atom(x):
5547                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5548                                                 noiselevel=-1)
5549                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5550                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5551                                         return (0,[])
5552                                 # Don't expand categories or old-style virtuals here unless
5553                                 # necessary. Expansion of old-style virtuals here causes at
5554                                 # least the following problems:
5555                                 #   1) It's more difficult to determine which set(s) an atom
5556                                 #      came from, if any.
5557                                 #   2) It takes away freedom from the resolver to choose other
5558                                 #      possible expansions when necessary.
5559                                 if "/" in x:
5560                                         args.append(AtomArg(arg=x, atom=x,
5561                                                 root_config=root_config))
5562                                         continue
5563                                 expanded_atoms = self._dep_expand(root_config, x)
5564                                 installed_cp_set = set()
5565                                 for atom in expanded_atoms:
5566                                         atom_cp = portage.dep_getkey(atom)
5567                                         if vardb.cp_list(atom_cp):
5568                                                 installed_cp_set.add(atom_cp)
5569                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5570                                         installed_cp = iter(installed_cp_set).next()
5571                                         expanded_atoms = [atom for atom in expanded_atoms \
5572                                                 if portage.dep_getkey(atom) == installed_cp]
5573
5574                                 if len(expanded_atoms) > 1:
5575                                         print
5576                                         print
5577                                         ambiguous_package_name(x, expanded_atoms, root_config,
5578                                                 self.spinner, self.myopts)
5579                                         return False, myfavorites
5580                                 if expanded_atoms:
5581                                         atom = expanded_atoms[0]
5582                                 else:
5583                                         null_atom = insert_category_into_atom(x, "null")
5584                                         null_cp = portage.dep_getkey(null_atom)
5585                                         cat, atom_pn = portage.catsplit(null_cp)
5586                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5587                                         if virts_p:
5588                                                 # Allow the depgraph to choose which virtual.
5589                                                 atom = insert_category_into_atom(x, "virtual")
5590                                         else:
5591                                                 atom = insert_category_into_atom(x, "null")
5592
5593                                 args.append(AtomArg(arg=x, atom=atom,
5594                                         root_config=root_config))
5595
5596                 if lookup_owners:
5597                         relative_paths = []
5598                         search_for_multiple = False
5599                         if len(lookup_owners) > 1:
5600                                 search_for_multiple = True
5601
5602                         for x in lookup_owners:
5603                                 if not search_for_multiple and os.path.isdir(x):
5604                                         search_for_multiple = True
5605                                 relative_paths.append(x[len(myroot):])
5606
5607                         owners = set()
5608                         for pkg, relative_path in \
5609                                 real_vardb._owners.iter_owners(relative_paths):
5610                                 owners.add(pkg.mycpv)
5611                                 if not search_for_multiple:
5612                                         break
5613
5614                         if not owners:
5615                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5616                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5617                                 return 0, []
5618
5619                         for cpv in owners:
5620                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5621                                 if not slot:
5622                                         # portage now masks packages with missing slot, but it's
5623                                         # possible that one was installed by an older version
5624                                         atom = portage.cpv_getkey(cpv)
5625                                 else:
5626                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5627                                 args.append(AtomArg(arg=atom, atom=atom,
5628                                         root_config=root_config))
5629
5630                 if "--update" in self.myopts:
5631                         # In some cases, the greedy slots behavior can pull in a slot that
5632                         # the user would want to uninstall due to it being blocked by a
5633                         # newer version in a different slot. Therefore, it's necessary to
5634                         # detect and discard any that should be uninstalled. Each time
5635                         # that arguments are updated, package selections are repeated in
5636                         # order to ensure consistency with the current arguments:
5637                         #
5638                         #  1) Initialize args
5639                         #  2) Select packages and generate initial greedy atoms
5640                         #  3) Update args with greedy atoms
5641                         #  4) Select packages and generate greedy atoms again, while
5642                         #     accounting for any blockers between selected packages
5643                         #  5) Update args with revised greedy atoms
5644
5645                         self._set_args(args)
5646                         greedy_args = []
5647                         for arg in args:
5648                                 greedy_args.append(arg)
5649                                 if not isinstance(arg, AtomArg):
5650                                         continue
5651                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5652                                         greedy_args.append(
5653                                                 AtomArg(arg=arg.arg, atom=atom,
5654                                                         root_config=arg.root_config))
5655
5656                         self._set_args(greedy_args)
5657                         del greedy_args
5658
5659                         # Revise greedy atoms, accounting for any blockers
5660                         # between selected packages.
5661                         revised_greedy_args = []
5662                         for arg in args:
5663                                 revised_greedy_args.append(arg)
5664                                 if not isinstance(arg, AtomArg):
5665                                         continue
5666                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5667                                         blocker_lookahead=True):
5668                                         revised_greedy_args.append(
5669                                                 AtomArg(arg=arg.arg, atom=atom,
5670                                                         root_config=arg.root_config))
5671                         args = revised_greedy_args
5672                         del revised_greedy_args
5673
5674                 self._set_args(args)
5675
5676                 myfavorites = set(myfavorites)
5677                 for arg in args:
5678                         if isinstance(arg, (AtomArg, PackageArg)):
5679                                 myfavorites.add(arg.atom)
5680                         elif isinstance(arg, SetArg):
5681                                 myfavorites.add(arg.arg)
5682                 myfavorites = list(myfavorites)
5683
5684                 pprovideddict = pkgsettings.pprovideddict
5685                 if debug:
5686                         portage.writemsg("\n", noiselevel=-1)
5687                 # Order needs to be preserved since a feature of --nodeps
5688                 # is to allow the user to force a specific merge order.
5689                 args.reverse()
5690                 while args:
5691                         arg = args.pop()
5692                         for atom in arg.set:
5693                                 self.spinner.update()
5694                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5695                                         root=myroot, parent=arg)
5696                                 atom_cp = portage.dep_getkey(atom)
5697                                 try:
5698                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5699                                         if pprovided and portage.match_from_list(atom, pprovided):
5700                                                 # A provided package has been specified on the command line.
5701                                                 self._pprovided_args.append((arg, atom))
5702                                                 continue
5703                                         if isinstance(arg, PackageArg):
5704                                                 if not self._add_pkg(arg.package, dep) or \
5705                                                         not self._create_graph():
5706                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5707                                                                 "dependencies for %s\n") % arg.arg)
5708                                                         return 0, myfavorites
5709                                                 continue
5710                                         if debug:
5711                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5712                                                         (arg, atom), noiselevel=-1)
5713                                         pkg, existing_node = self._select_package(
5714                                                 myroot, atom, onlydeps=onlydeps)
5715                                         if not pkg:
5716                                                 if not (isinstance(arg, SetArg) and \
5717                                                         arg.name in ("system", "world")):
5718                                                         self._unsatisfied_deps_for_display.append(
5719                                                                 ((myroot, atom), {}))
5720                                                         return 0, myfavorites
5721                                                 self._missing_args.append((arg, atom))
5722                                                 continue
5723                                         if atom_cp != pkg.cp:
5724                                                 # For old-style virtuals, we need to repeat the
5725                                                 # package.provided check against the selected package.
5726                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5727                                                 pprovided = pprovideddict.get(pkg.cp)
5728                                                 if pprovided and \
5729                                                         portage.match_from_list(expanded_atom, pprovided):
5730                                                         # A provided package has been
5731                                                         # specified on the command line.
5732                                                         self._pprovided_args.append((arg, atom))
5733                                                         continue
5734                                         if pkg.installed and "selective" not in self.myparams:
5735                                                 self._unsatisfied_deps_for_display.append(
5736                                                         ((myroot, atom), {}))
5737                                                 # Previous behavior was to bail out in this case, but
5738                                                 # since the dep is satisfied by the installed package,
5739                                                 # it's more friendly to continue building the graph
5740                                                 # and just show a warning message. Therefore, only bail
5741                                                 # out here if the atom is not from either the system or
5742                                                 # world set.
5743                                                 if not (isinstance(arg, SetArg) and \
5744                                                         arg.name in ("system", "world")):
5745                                                         return 0, myfavorites
5746
5747                                         # Add the selected package to the graph as soon as possible
5748                                         # so that later dep_check() calls can use it as feedback
5749                                         # for making more consistent atom selections.
5750                                         if not self._add_pkg(pkg, dep):
5751                                                 if isinstance(arg, SetArg):
5752                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5753                                                                 "dependencies for %s from %s\n") % \
5754                                                                 (atom, arg.arg))
5755                                                 else:
5756                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5757                                                                 "dependencies for %s\n") % atom)
5758                                                 return 0, myfavorites
5759
5760                                 except portage.exception.MissingSignature, e:
5761                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5762                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5763                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5764                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5765                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5766                                         return 0, myfavorites
5767                                 except portage.exception.InvalidSignature, e:
5768                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5769                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5770                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5771                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5772                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5773                                         return 0, myfavorites
5774                                 except SystemExit, e:
5775                                         raise # Needed else can't exit
5776                                 except Exception, e:
5777                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5778                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5779                                         raise
5780
5781                 # Now that the root packages have been added to the graph,
5782                 # process the dependencies.
5783                 if not self._create_graph():
5784                         return 0, myfavorites
5785
5786                 missing=0
5787                 if "--usepkgonly" in self.myopts:
5788                         for xs in self.digraph.all_nodes():
5789                                 if not isinstance(xs, Package):
5790                                         continue
5791                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5792                                         if missing == 0:
5793                                                 print
5794                                         missing += 1
5795                                         print "Missing binary for:",xs[2]
5796
5797                 try:
5798                         self.altlist()
5799                 except self._unknown_internal_error:
5800                         return False, myfavorites
5801
5802                 # We're true here unless we are missing binaries.
5803                 return (not missing,myfavorites)
5804
5805         def _set_args(self, args):
5806                 """
5807                 Create the "args" package set from atoms and packages given as
5808                 arguments. This method can be called multiple times if necessary.
5809                 The package selection cache is automatically invalidated, since
5810                 arguments influence package selections.
5811                 """
5812                 args_set = self._sets["args"]
5813                 args_set.clear()
5814                 for arg in args:
5815                         if not isinstance(arg, (AtomArg, PackageArg)):
5816                                 continue
5817                         atom = arg.atom
5818                         if atom in args_set:
5819                                 continue
5820                         args_set.add(atom)
5821
5822                 self._set_atoms.clear()
5823                 self._set_atoms.update(chain(*self._sets.itervalues()))
5824                 atom_arg_map = self._atom_arg_map
5825                 atom_arg_map.clear()
5826                 for arg in args:
5827                         for atom in arg.set:
5828                                 atom_key = (atom, arg.root_config.root)
5829                                 refs = atom_arg_map.get(atom_key)
5830                                 if refs is None:
5831                                         refs = []
5832                                         atom_arg_map[atom_key] = refs
5833                                         if arg not in refs:
5834                                                 refs.append(arg)
5835
5836                 # Invalidate the package selection cache, since
5837                 # arguments influence package selections.
5838                 self._highest_pkg_cache.clear()
5839                 for trees in self._filtered_trees.itervalues():
5840                         trees["porttree"].dbapi._clear_cache()
5841
5842         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5843                 """
5844                 Return a list of slot atoms corresponding to installed slots that
5845                 differ from the slot of the highest visible match. When
5846                 blocker_lookahead is True, slot atoms that would trigger a blocker
5847                 conflict are automatically discarded, potentially allowing automatic
5848                 uninstallation of older slots when appropriate.
5849                 """
5850                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5851                 if highest_pkg is None:
5852                         return []
5853                 vardb = root_config.trees["vartree"].dbapi
5854                 slots = set()
5855                 for cpv in vardb.match(atom):
5856                         # don't mix new virtuals with old virtuals
5857                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5858                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5859
5860                 slots.add(highest_pkg.metadata["SLOT"])
5861                 if len(slots) == 1:
5862                         return []
5863                 greedy_pkgs = []
5864                 slots.remove(highest_pkg.metadata["SLOT"])
5865                 while slots:
5866                         slot = slots.pop()
5867                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5868                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5869                         if pkg is not None and \
5870                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5871                                 greedy_pkgs.append(pkg)
5872                 if not greedy_pkgs:
5873                         return []
5874                 if not blocker_lookahead:
5875                         return [pkg.slot_atom for pkg in greedy_pkgs]
5876
5877                 blockers = {}
5878                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5879                 for pkg in greedy_pkgs + [highest_pkg]:
5880                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5881                         try:
5882                                 atoms = self._select_atoms(
5883                                         pkg.root, dep_str, pkg.use.enabled,
5884                                         parent=pkg, strict=True)
5885                         except portage.exception.InvalidDependString:
5886                                 continue
5887                         blocker_atoms = (x for x in atoms if x.blocker)
5888                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5889
5890                 if highest_pkg not in blockers:
5891                         return []
5892
5893                 # filter packages with invalid deps
5894                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5895
5896                 # filter packages that conflict with highest_pkg
5897                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5898                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5899                         blockers[pkg].findAtomForPackage(highest_pkg))]
5900
5901                 if not greedy_pkgs:
5902                         return []
5903
5904                 # If two packages conflict, discard the lower version.
5905                 discard_pkgs = set()
5906                 greedy_pkgs.sort(reverse=True)
5907                 for i in xrange(len(greedy_pkgs) - 1):
5908                         pkg1 = greedy_pkgs[i]
5909                         if pkg1 in discard_pkgs:
5910                                 continue
5911                         for j in xrange(i + 1, len(greedy_pkgs)):
5912                                 pkg2 = greedy_pkgs[j]
5913                                 if pkg2 in discard_pkgs:
5914                                         continue
5915                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5916                                         blockers[pkg2].findAtomForPackage(pkg1):
5917                                         # pkg1 > pkg2
5918                                         discard_pkgs.add(pkg2)
5919
5920                 return [pkg.slot_atom for pkg in greedy_pkgs \
5921                         if pkg not in discard_pkgs]
5922
5923         def _select_atoms_from_graph(self, *pargs, **kwargs):
5924                 """
5925                 Prefer atoms matching packages that have already been
5926                 added to the graph or those that are installed and have
5927                 not been scheduled for replacement.
5928                 """
5929                 kwargs["trees"] = self._graph_trees
5930                 return self._select_atoms_highest_available(*pargs, **kwargs)
5931
5932         def _select_atoms_highest_available(self, root, depstring,
5933                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5934                 """This will raise InvalidDependString if necessary. If trees is
5935                 None then self._filtered_trees is used."""
5936                 pkgsettings = self.pkgsettings[root]
5937                 if trees is None:
5938                         trees = self._filtered_trees
5939                 if not getattr(priority, "buildtime", False):
5940                         # The parent should only be passed to dep_check() for buildtime
5941                         # dependencies since that's the only case when it's appropriate
5942                         # to trigger the circular dependency avoidance code which uses it.
5943                         # It's important not to trigger the same circular dependency
5944                         # avoidance code for runtime dependencies since it's not needed
5945                         # and it can promote an incorrect package choice.
5946                         parent = None
5947                 if True:
5948                         try:
5949                                 if parent is not None:
5950                                         trees[root]["parent"] = parent
5951                                 if not strict:
5952                                         portage.dep._dep_check_strict = False
5953                                 mycheck = portage.dep_check(depstring, None,
5954                                         pkgsettings, myuse=myuse,
5955                                         myroot=root, trees=trees)
5956                         finally:
5957                                 if parent is not None:
5958                                         trees[root].pop("parent")
5959                                 portage.dep._dep_check_strict = True
5960                         if not mycheck[0]:
5961                                 raise portage.exception.InvalidDependString(mycheck[1])
5962                         selected_atoms = mycheck[1]
5963                 return selected_atoms
5964
5965         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5966                 atom = portage.dep.Atom(atom)
5967                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5968                 atom_without_use = atom
5969                 if atom.use:
5970                         atom_without_use = portage.dep.remove_slot(atom)
5971                         if atom.slot:
5972                                 atom_without_use += ":" + atom.slot
5973                         atom_without_use = portage.dep.Atom(atom_without_use)
5974                 xinfo = '"%s"' % atom
5975                 if arg:
5976                         xinfo='"%s"' % arg
5977                 # Discard null/ from failed cpv_expand category expansion.
5978                 xinfo = xinfo.replace("null/", "")
5979                 masked_packages = []
5980                 missing_use = []
5981                 masked_pkg_instances = set()
5982                 missing_licenses = []
5983                 have_eapi_mask = False
5984                 pkgsettings = self.pkgsettings[root]
5985                 implicit_iuse = pkgsettings._get_implicit_iuse()
5986                 root_config = self.roots[root]
5987                 portdb = self.roots[root].trees["porttree"].dbapi
5988                 dbs = self._filtered_trees[root]["dbs"]
5989                 for db, pkg_type, built, installed, db_keys in dbs:
5990                         if installed:
5991                                 continue
5992                         match = db.match
5993                         if hasattr(db, "xmatch"):
5994                                 cpv_list = db.xmatch("match-all", atom_without_use)
5995                         else:
5996                                 cpv_list = db.match(atom_without_use)
5997                         # descending order
5998                         cpv_list.reverse()
5999                         for cpv in cpv_list:
6000                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6001                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6002                                 if metadata is not None:
6003                                         pkg = Package(built=built, cpv=cpv,
6004                                                 installed=installed, metadata=metadata,
6005                                                 root_config=root_config)
6006                                         if pkg.cp != atom.cp:
6007                                                 # A cpv can be returned from dbapi.match() as an
6008                                                 # old-style virtual match even in cases when the
6009                                                 # package does not actually PROVIDE the virtual.
6010                                                 # Filter out any such false matches here.
6011                                                 if not atom_set.findAtomForPackage(pkg):
6012                                                         continue
6013                                         if mreasons:
6014                                                 masked_pkg_instances.add(pkg)
6015                                         if atom.use:
6016                                                 missing_use.append(pkg)
6017                                                 if not mreasons:
6018                                                         continue
6019                                 masked_packages.append(
6020                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6021
6022                 missing_use_reasons = []
6023                 missing_iuse_reasons = []
6024                 for pkg in missing_use:
6025                         use = pkg.use.enabled
6026                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6027                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6028                         missing_iuse = []
6029                         for x in atom.use.required:
6030                                 if iuse_re.match(x) is None:
6031                                         missing_iuse.append(x)
6032                         mreasons = []
6033                         if missing_iuse:
6034                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6035                                 missing_iuse_reasons.append((pkg, mreasons))
6036                         else:
6037                                 need_enable = sorted(atom.use.enabled.difference(use))
6038                                 need_disable = sorted(atom.use.disabled.intersection(use))
6039                                 if need_enable or need_disable:
6040                                         changes = []
6041                                         changes.extend(colorize("red", "+" + x) \
6042                                                 for x in need_enable)
6043                                         changes.extend(colorize("blue", "-" + x) \
6044                                                 for x in need_disable)
6045                                         mreasons.append("Change USE: %s" % " ".join(changes))
6046                                         missing_use_reasons.append((pkg, mreasons))
6047
6048                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6049                         in missing_use_reasons if pkg not in masked_pkg_instances]
6050
6051                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6053
6054                 show_missing_use = False
6055                 if unmasked_use_reasons:
6056                         # Only show the latest version.
6057                         show_missing_use = unmasked_use_reasons[:1]
6058                 elif unmasked_iuse_reasons:
6059                         if missing_use_reasons:
6060                                 # All packages with required IUSE are masked,
6061                                 # so display a normal masking message.
6062                                 pass
6063                         else:
6064                                 show_missing_use = unmasked_iuse_reasons
6065
6066                 if show_missing_use:
6067                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6068                         print "!!! One of the following packages is required to complete your request:"
6069                         for pkg, mreasons in show_missing_use:
6070                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6071
6072                 elif masked_packages:
6073                         print "\n!!! " + \
6074                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6075                                 colorize("INFORM", xinfo) + \
6076                                 colorize("BAD", " have been masked.")
6077                         print "!!! One of the following masked packages is required to complete your request:"
6078                         have_eapi_mask = show_masked_packages(masked_packages)
6079                         if have_eapi_mask:
6080                                 print
6081                                 msg = ("The current version of portage supports " + \
6082                                         "EAPI '%s'. You must upgrade to a newer version" + \
6083                                         " of portage before EAPI masked packages can" + \
6084                                         " be installed.") % portage.const.EAPI
6085                                 from textwrap import wrap
6086                                 for line in wrap(msg, 75):
6087                                         print line
6088                         print
6089                         show_mask_docs()
6090                 else:
6091                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6092
6093                 # Show parent nodes and the argument that pulled them in.
6094                 traversed_nodes = set()
6095                 node = myparent
6096                 msg = []
6097                 while node is not None:
6098                         traversed_nodes.add(node)
6099                         msg.append('(dependency required by "%s" [%s])' % \
6100                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6101                         # When traversing to parents, prefer arguments over packages
6102                         # since arguments are root nodes. Never traverse the same
6103                         # package twice, in order to prevent an infinite loop.
6104                         selected_parent = None
6105                         for parent in self.digraph.parent_nodes(node):
6106                                 if isinstance(parent, DependencyArg):
6107                                         msg.append('(dependency required by "%s" [argument])' % \
6108                                                 (colorize('INFORM', str(parent))))
6109                                         selected_parent = None
6110                                         break
6111                                 if parent not in traversed_nodes:
6112                                         selected_parent = parent
6113                         node = selected_parent
6114                 for line in msg:
6115                         print line
6116
6117                 print
6118
6119         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6120                 cache_key = (root, atom, onlydeps)
6121                 ret = self._highest_pkg_cache.get(cache_key)
6122                 if ret is not None:
6123                         pkg, existing = ret
6124                         if pkg and not existing:
6125                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6126                                 if existing and existing == pkg:
6127                                         # Update the cache to reflect that the
6128                                         # package has been added to the graph.
6129                                         ret = pkg, pkg
6130                                         self._highest_pkg_cache[cache_key] = ret
6131                         return ret
6132                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6133                 self._highest_pkg_cache[cache_key] = ret
6134                 pkg, existing = ret
6135                 if pkg is not None:
6136                         settings = pkg.root_config.settings
6137                         if visible(settings, pkg) and not (pkg.installed and \
6138                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6139                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6140                 return ret
6141
6142         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6143                 root_config = self.roots[root]
6144                 pkgsettings = self.pkgsettings[root]
6145                 dbs = self._filtered_trees[root]["dbs"]
6146                 vardb = self.roots[root].trees["vartree"].dbapi
6147                 portdb = self.roots[root].trees["porttree"].dbapi
6148                 # List of acceptable packages, ordered by type preference.
6149                 matched_packages = []
6150                 highest_version = None
6151                 if not isinstance(atom, portage.dep.Atom):
6152                         atom = portage.dep.Atom(atom)
6153                 atom_cp = atom.cp
6154                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6155                 existing_node = None
6156                 myeb = None
6157                 usepkgonly = "--usepkgonly" in self.myopts
6158                 empty = "empty" in self.myparams
6159                 selective = "selective" in self.myparams
6160                 reinstall = False
6161                 noreplace = "--noreplace" in self.myopts
6162                 # Behavior of the "selective" parameter depends on
6163                 # whether or not a package matches an argument atom.
6164                 # If an installed package provides an old-style
6165                 # virtual that is no longer provided by an available
6166                 # package, the installed package may match an argument
6167                 # atom even though none of the available packages do.
6168                 # Therefore, "selective" logic does not consider
6169                 # whether or not an installed package matches an
6170                 # argument atom. It only considers whether or not
6171                 # available packages match argument atoms, which is
6172                 # represented by the found_available_arg flag.
6173                 found_available_arg = False
6174                 for find_existing_node in True, False:
6175                         if existing_node:
6176                                 break
6177                         for db, pkg_type, built, installed, db_keys in dbs:
6178                                 if existing_node:
6179                                         break
6180                                 if installed and not find_existing_node:
6181                                         want_reinstall = reinstall or empty or \
6182                                                 (found_available_arg and not selective)
6183                                         if want_reinstall and matched_packages:
6184                                                 continue
6185                                 if hasattr(db, "xmatch"):
6186                                         cpv_list = db.xmatch("match-all", atom)
6187                                 else:
6188                                         cpv_list = db.match(atom)
6189
6190                                 # USE=multislot can make an installed package appear as if
6191                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6192                                 # won't do any good as long as USE=multislot is enabled since
6193                                 # the newly built package still won't have the expected slot.
6194                                 # Therefore, assume that such SLOT dependencies are already
6195                                 # satisfied rather than forcing a rebuild.
6196                                 if installed and not cpv_list and atom.slot:
6197                                         for cpv in db.match(atom.cp):
6198                                                 slot_available = False
6199                                                 for other_db, other_type, other_built, \
6200                                                         other_installed, other_keys in dbs:
6201                                                         try:
6202                                                                 if atom.slot == \
6203                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6204                                                                         slot_available = True
6205                                                                         break
6206                                                         except KeyError:
6207                                                                 pass
6208                                                 if not slot_available:
6209                                                         continue
6210                                                 inst_pkg = self._pkg(cpv, "installed",
6211                                                         root_config, installed=installed)
6212                                                 # Remove the slot from the atom and verify that
6213                                                 # the package matches the resulting atom.
6214                                                 atom_without_slot = portage.dep.remove_slot(atom)
6215                                                 if atom.use:
6216                                                         atom_without_slot += str(atom.use)
6217                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6218                                                 if portage.match_from_list(
6219                                                         atom_without_slot, [inst_pkg]):
6220                                                         cpv_list = [inst_pkg.cpv]
6221                                                 break
6222
6223                                 if not cpv_list:
6224                                         continue
6225                                 pkg_status = "merge"
6226                                 if installed or onlydeps:
6227                                         pkg_status = "nomerge"
6228                                 # descending order
6229                                 cpv_list.reverse()
6230                                 for cpv in cpv_list:
6231                                         # Make --noreplace take precedence over --newuse.
6232                                         if not installed and noreplace and \
6233                                                 cpv in vardb.match(atom):
6234                                                 # If the installed version is masked, it may
6235                                                 # be necessary to look at lower versions,
6236                                                 # in case there is a visible downgrade.
6237                                                 continue
6238                                         reinstall_for_flags = None
6239                                         cache_key = (pkg_type, root, cpv, pkg_status)
6240                                         calculated_use = True
6241                                         pkg = self._pkg_cache.get(cache_key)
6242                                         if pkg is None:
6243                                                 calculated_use = False
6244                                                 try:
6245                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6246                                                 except KeyError:
6247                                                         continue
6248                                                 pkg = Package(built=built, cpv=cpv,
6249                                                         installed=installed, metadata=metadata,
6250                                                         onlydeps=onlydeps, root_config=root_config,
6251                                                         type_name=pkg_type)
6252                                                 metadata = pkg.metadata
6253                                                 if not built:
6254                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6255                                                 if not built and ("?" in metadata["LICENSE"] or \
6256                                                         "?" in metadata["PROVIDE"]):
6257                                                         # This is avoided whenever possible because
6258                                                         # it's expensive. It only needs to be done here
6259                                                         # if it has an effect on visibility.
6260                                                         pkgsettings.setcpv(pkg)
6261                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6262                                                         calculated_use = True
6263                                                 self._pkg_cache[pkg] = pkg
6264
6265                                         if not installed or (built and matched_packages):
6266                                                 # Only enforce visibility on installed packages
6267                                                 # if there is at least one other visible package
6268                                                 # available. By filtering installed masked packages
6269                                                 # here, packages that have been masked since they
6270                                                 # were installed can be automatically downgraded
6271                                                 # to an unmasked version.
6272                                                 try:
6273                                                         if not visible(pkgsettings, pkg):
6274                                                                 continue
6275                                                 except portage.exception.InvalidDependString:
6276                                                         if not installed:
6277                                                                 continue
6278
6279                                                 # Enable upgrade or downgrade to a version
6280                                                 # with visible KEYWORDS when the installed
6281                                                 # version is masked by KEYWORDS, but never
6282                                                 # reinstall the same exact version only due
6283                                                 # to a KEYWORDS mask.
6284                                                 if built and matched_packages:
6285
6286                                                         different_version = None
6287                                                         for avail_pkg in matched_packages:
6288                                                                 if not portage.dep.cpvequal(
6289                                                                         pkg.cpv, avail_pkg.cpv):
6290                                                                         different_version = avail_pkg
6291                                                                         break
6292                                                         if different_version is not None:
6293
6294                                                                 if installed and \
6295                                                                         pkgsettings._getMissingKeywords(
6296                                                                         pkg.cpv, pkg.metadata):
6297                                                                         continue
6298
6299                                                                 # If the ebuild no longer exists or it's
6300                                                                 # keywords have been dropped, reject built
6301                                                                 # instances (installed or binary).
6302                                                                 # If --usepkgonly is enabled, assume that
6303                                                                 # the ebuild status should be ignored.
6304                                                                 if not usepkgonly:
6305                                                                         try:
6306                                                                                 pkg_eb = self._pkg(
6307                                                                                         pkg.cpv, "ebuild", root_config)
6308                                                                         except portage.exception.PackageNotFound:
6309                                                                                 continue
6310                                                                         else:
6311                                                                                 if not visible(pkgsettings, pkg_eb):
6312                                                                                         continue
6313
6314                                         if not pkg.built and not calculated_use:
6315                                                 # This is avoided whenever possible because
6316                                                 # it's expensive.
6317                                                 pkgsettings.setcpv(pkg)
6318                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6319
6320                                         if pkg.cp != atom.cp:
6321                                                 # A cpv can be returned from dbapi.match() as an
6322                                                 # old-style virtual match even in cases when the
6323                                                 # package does not actually PROVIDE the virtual.
6324                                                 # Filter out any such false matches here.
6325                                                 if not atom_set.findAtomForPackage(pkg):
6326                                                         continue
6327
6328                                         myarg = None
6329                                         if root == self.target_root:
6330                                                 try:
6331                                                         # Ebuild USE must have been calculated prior
6332                                                         # to this point, in case atoms have USE deps.
6333                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6334                                                 except StopIteration:
6335                                                         pass
6336                                                 except portage.exception.InvalidDependString:
6337                                                         if not installed:
6338                                                                 # masked by corruption
6339                                                                 continue
6340                                         if not installed and myarg:
6341                                                 found_available_arg = True
6342
6343                                         if atom.use and not pkg.built:
6344                                                 use = pkg.use.enabled
6345                                                 if atom.use.enabled.difference(use):
6346                                                         continue
6347                                                 if atom.use.disabled.intersection(use):
6348                                                         continue
6349                                         if pkg.cp == atom_cp:
6350                                                 if highest_version is None:
6351                                                         highest_version = pkg
6352                                                 elif pkg > highest_version:
6353                                                         highest_version = pkg
6354                                         # At this point, we've found the highest visible
6355                                         # match from the current repo. Any lower versions
6356                                         # from this repo are ignored, so this so the loop
6357                                         # will always end with a break statement below
6358                                         # this point.
6359                                         if find_existing_node:
6360                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6361                                                 if not e_pkg:
6362                                                         break
6363                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6364                                                         if highest_version and \
6365                                                                 e_pkg.cp == atom_cp and \
6366                                                                 e_pkg < highest_version and \
6367                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6368                                                                 # There is a higher version available in a
6369                                                                 # different slot, so this existing node is
6370                                                                 # irrelevant.
6371                                                                 pass
6372                                                         else:
6373                                                                 matched_packages.append(e_pkg)
6374                                                                 existing_node = e_pkg
6375                                                 break
6376                                         # Compare built package to current config and
6377                                         # reject the built package if necessary.
6378                                         if built and not installed and \
6379                                                 ("--newuse" in self.myopts or \
6380                                                 "--reinstall" in self.myopts):
6381                                                 iuses = pkg.iuse.all
6382                                                 old_use = pkg.use.enabled
6383                                                 if myeb:
6384                                                         pkgsettings.setcpv(myeb)
6385                                                 else:
6386                                                         pkgsettings.setcpv(pkg)
6387                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6388                                                 forced_flags = set()
6389                                                 forced_flags.update(pkgsettings.useforce)
6390                                                 forced_flags.update(pkgsettings.usemask)
6391                                                 cur_iuse = iuses
6392                                                 if myeb and not usepkgonly:
6393                                                         cur_iuse = myeb.iuse.all
6394                                                 if self._reinstall_for_flags(forced_flags,
6395                                                         old_use, iuses,
6396                                                         now_use, cur_iuse):
6397                                                         break
6398                                         # Compare current config to installed package
6399                                         # and do not reinstall if possible.
6400                                         if not installed and \
6401                                                 ("--newuse" in self.myopts or \
6402                                                 "--reinstall" in self.myopts) and \
6403                                                 cpv in vardb.match(atom):
6404                                                 pkgsettings.setcpv(pkg)
6405                                                 forced_flags = set()
6406                                                 forced_flags.update(pkgsettings.useforce)
6407                                                 forced_flags.update(pkgsettings.usemask)
6408                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6409                                                 old_iuse = set(filter_iuse_defaults(
6410                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6411                                                 cur_use = pkg.use.enabled
6412                                                 cur_iuse = pkg.iuse.all
6413                                                 reinstall_for_flags = \
6414                                                         self._reinstall_for_flags(
6415                                                         forced_flags, old_use, old_iuse,
6416                                                         cur_use, cur_iuse)
6417                                                 if reinstall_for_flags:
6418                                                         reinstall = True
6419                                         if not built:
6420                                                 myeb = pkg
6421                                         matched_packages.append(pkg)
6422                                         if reinstall_for_flags:
6423                                                 self._reinstall_nodes[pkg] = \
6424                                                         reinstall_for_flags
6425                                         break
6426
6427                 if not matched_packages:
6428                         return None, None
6429
6430                 if "--debug" in self.myopts:
6431                         for pkg in matched_packages:
6432                                 portage.writemsg("%s %s\n" % \
6433                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6434
6435                 # Filter out any old-style virtual matches if they are
6436                 # mixed with new-style virtual matches.
6437                 cp = portage.dep_getkey(atom)
6438                 if len(matched_packages) > 1 and \
6439                         "virtual" == portage.catsplit(cp)[0]:
6440                         for pkg in matched_packages:
6441                                 if pkg.cp != cp:
6442                                         continue
6443                                 # Got a new-style virtual, so filter
6444                                 # out any old-style virtuals.
6445                                 matched_packages = [pkg for pkg in matched_packages \
6446                                         if pkg.cp == cp]
6447                                 break
6448
6449                 if len(matched_packages) > 1:
6450                         bestmatch = portage.best(
6451                                 [pkg.cpv for pkg in matched_packages])
6452                         matched_packages = [pkg for pkg in matched_packages \
6453                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6454
6455                 # ordered by type preference ("ebuild" type is the last resort)
6456                 return  matched_packages[-1], existing_node
6457
6458         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6459                 """
6460                 Select packages that have already been added to the graph or
6461                 those that are installed and have not been scheduled for
6462                 replacement.
6463                 """
6464                 graph_db = self._graph_trees[root]["porttree"].dbapi
6465                 matches = graph_db.match_pkgs(atom)
6466                 if not matches:
6467                         return None, None
6468                 pkg = matches[-1] # highest match
6469                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6470                 return pkg, in_graph
6471
6472         def _complete_graph(self):
6473                 """
6474                 Add any deep dependencies of required sets (args, system, world) that
6475                 have not been pulled into the graph yet. This ensures that the graph
6476                 is consistent such that initially satisfied deep dependencies are not
6477                 broken in the new graph. Initially unsatisfied dependencies are
6478                 irrelevant since we only want to avoid breaking dependencies that are
6479                 intially satisfied.
6480
6481                 Since this method can consume enough time to disturb users, it is
6482                 currently only enabled by the --complete-graph option.
6483                 """
6484                 if "--buildpkgonly" in self.myopts or \
6485                         "recurse" not in self.myparams:
6486                         return 1
6487
6488                 if "complete" not in self.myparams:
6489                         # Skip this to avoid consuming enough time to disturb users.
6490                         return 1
6491
6492                 # Put the depgraph into a mode that causes it to only
6493                 # select packages that have already been added to the
6494                 # graph or those that are installed and have not been
6495                 # scheduled for replacement. Also, toggle the "deep"
6496                 # parameter so that all dependencies are traversed and
6497                 # accounted for.
6498                 self._select_atoms = self._select_atoms_from_graph
6499                 self._select_package = self._select_pkg_from_graph
6500                 already_deep = "deep" in self.myparams
6501                 if not already_deep:
6502                         self.myparams.add("deep")
6503
6504                 for root in self.roots:
6505                         required_set_names = self._required_set_names.copy()
6506                         if root == self.target_root and \
6507                                 (already_deep or "empty" in self.myparams):
6508                                 required_set_names.difference_update(self._sets)
6509                         if not required_set_names and not self._ignored_deps:
6510                                 continue
6511                         root_config = self.roots[root]
6512                         setconfig = root_config.setconfig
6513                         args = []
6514                         # Reuse existing SetArg instances when available.
6515                         for arg in self.digraph.root_nodes():
6516                                 if not isinstance(arg, SetArg):
6517                                         continue
6518                                 if arg.root_config != root_config:
6519                                         continue
6520                                 if arg.name in required_set_names:
6521                                         args.append(arg)
6522                                         required_set_names.remove(arg.name)
6523                         # Create new SetArg instances only when necessary.
6524                         for s in required_set_names:
6525                                 expanded_set = InternalPackageSet(
6526                                         initial_atoms=setconfig.getSetAtoms(s))
6527                                 atom = SETPREFIX + s
6528                                 args.append(SetArg(arg=atom, set=expanded_set,
6529                                         root_config=root_config))
6530                         vardb = root_config.trees["vartree"].dbapi
6531                         for arg in args:
6532                                 for atom in arg.set:
6533                                         self._dep_stack.append(
6534                                                 Dependency(atom=atom, root=root, parent=arg))
6535                         if self._ignored_deps:
6536                                 self._dep_stack.extend(self._ignored_deps)
6537                                 self._ignored_deps = []
6538                         if not self._create_graph(allow_unsatisfied=True):
6539                                 return 0
6540                         # Check the unsatisfied deps to see if any initially satisfied deps
6541                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6542                         # deps are irrelevant since we only want to avoid breaking deps
6543                         # that are initially satisfied.
6544                         while self._unsatisfied_deps:
6545                                 dep = self._unsatisfied_deps.pop()
6546                                 matches = vardb.match_pkgs(dep.atom)
6547                                 if not matches:
6548                                         self._initially_unsatisfied_deps.append(dep)
6549                                         continue
6550                                 # An scheduled installation broke a deep dependency.
6551                                 # Add the installed package to the graph so that it
6552                                 # will be appropriately reported as a slot collision
6553                                 # (possibly solvable via backtracking).
6554                                 pkg = matches[-1] # highest match
6555                                 if not self._add_pkg(pkg, dep):
6556                                         return 0
6557                                 if not self._create_graph(allow_unsatisfied=True):
6558                                         return 0
6559                 return 1
6560
6561         def _pkg(self, cpv, type_name, root_config, installed=False):
6562                 """
6563                 Get a package instance from the cache, or create a new
6564                 one if necessary. Raises KeyError from aux_get if it
6565                 failures for some reason (package does not exist or is
6566                 corrupt).
6567                 """
6568                 operation = "merge"
6569                 if installed:
6570                         operation = "nomerge"
6571                 pkg = self._pkg_cache.get(
6572                         (type_name, root_config.root, cpv, operation))
6573                 if pkg is None:
6574                         tree_type = self.pkg_tree_map[type_name]
6575                         db = root_config.trees[tree_type].dbapi
6576                         db_keys = list(self._trees_orig[root_config.root][
6577                                 tree_type].dbapi._aux_cache_keys)
6578                         try:
6579                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6580                         except KeyError:
6581                                 raise portage.exception.PackageNotFound(cpv)
6582                         pkg = Package(cpv=cpv, metadata=metadata,
6583                                 root_config=root_config, installed=installed)
6584                         if type_name == "ebuild":
6585                                 settings = self.pkgsettings[root_config.root]
6586                                 settings.setcpv(pkg)
6587                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6588                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6589                         self._pkg_cache[pkg] = pkg
6590                 return pkg
6591
6592         def validate_blockers(self):
6593                 """Remove any blockers from the digraph that do not match any of the
6594                 packages within the graph.  If necessary, create hard deps to ensure
6595                 correct merge order such that mutually blocking packages are never
6596                 installed simultaneously."""
6597
6598                 if "--buildpkgonly" in self.myopts or \
6599                         "--nodeps" in self.myopts:
6600                         return True
6601
6602                 #if "deep" in self.myparams:
6603                 if True:
6604                         # Pull in blockers from all installed packages that haven't already
6605                         # been pulled into the depgraph.  This is not enabled by default
6606                         # due to the performance penalty that is incurred by all the
6607                         # additional dep_check calls that are required.
6608
6609                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6610                         for myroot in self.trees:
6611                                 vardb = self.trees[myroot]["vartree"].dbapi
6612                                 portdb = self.trees[myroot]["porttree"].dbapi
6613                                 pkgsettings = self.pkgsettings[myroot]
6614                                 final_db = self.mydbapi[myroot]
6615
6616                                 blocker_cache = BlockerCache(myroot, vardb)
6617                                 stale_cache = set(blocker_cache)
6618                                 for pkg in vardb:
6619                                         cpv = pkg.cpv
6620                                         stale_cache.discard(cpv)
6621                                         pkg_in_graph = self.digraph.contains(pkg)
6622
6623                                         # Check for masked installed packages. Only warn about
6624                                         # packages that are in the graph in order to avoid warning
6625                                         # about those that will be automatically uninstalled during
6626                                         # the merge process or by --depclean.
6627                                         if pkg in final_db:
6628                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6629                                                         self._masked_installed.add(pkg)
6630
6631                                         blocker_atoms = None
6632                                         blockers = None
6633                                         if pkg_in_graph:
6634                                                 blockers = []
6635                                                 try:
6636                                                         blockers.extend(
6637                                                                 self._blocker_parents.child_nodes(pkg))
6638                                                 except KeyError:
6639                                                         pass
6640                                                 try:
6641                                                         blockers.extend(
6642                                                                 self._irrelevant_blockers.child_nodes(pkg))
6643                                                 except KeyError:
6644                                                         pass
6645                                         if blockers is not None:
6646                                                 blockers = set(str(blocker.atom) \
6647                                                         for blocker in blockers)
6648
6649                                         # If this node has any blockers, create a "nomerge"
6650                                         # node for it so that they can be enforced.
6651                                         self.spinner.update()
6652                                         blocker_data = blocker_cache.get(cpv)
6653                                         if blocker_data is not None and \
6654                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6655                                                 blocker_data = None
6656
6657                                         # If blocker data from the graph is available, use
6658                                         # it to validate the cache and update the cache if
6659                                         # it seems invalid.
6660                                         if blocker_data is not None and \
6661                                                 blockers is not None:
6662                                                 if not blockers.symmetric_difference(
6663                                                         blocker_data.atoms):
6664                                                         continue
6665                                                 blocker_data = None
6666
6667                                         if blocker_data is None and \
6668                                                 blockers is not None:
6669                                                 # Re-use the blockers from the graph.
6670                                                 blocker_atoms = sorted(blockers)
6671                                                 counter = long(pkg.metadata["COUNTER"])
6672                                                 blocker_data = \
6673                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6674                                                 blocker_cache[pkg.cpv] = blocker_data
6675                                                 continue
6676
6677                                         if blocker_data:
6678                                                 blocker_atoms = blocker_data.atoms
6679                                         else:
6680                                                 # Use aux_get() to trigger FakeVartree global
6681                                                 # updates on *DEPEND when appropriate.
6682                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6683                                                 # It is crucial to pass in final_db here in order to
6684                                                 # optimize dep_check calls by eliminating atoms via
6685                                                 # dep_wordreduce and dep_eval calls.
6686                                                 try:
6687                                                         portage.dep._dep_check_strict = False
6688                                                         try:
6689                                                                 success, atoms = portage.dep_check(depstr,
6690                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6691                                                                         trees=self._graph_trees, myroot=myroot)
6692                                                         except Exception, e:
6693                                                                 if isinstance(e, SystemExit):
6694                                                                         raise
6695                                                                 # This is helpful, for example, if a ValueError
6696                                                                 # is thrown from cpv_expand due to multiple
6697                                                                 # matches (this can happen if an atom lacks a
6698                                                                 # category).
6699                                                                 show_invalid_depstring_notice(
6700                                                                         pkg, depstr, str(e))
6701                                                                 del e
6702                                                                 raise
6703                                                 finally:
6704                                                         portage.dep._dep_check_strict = True
6705                                                 if not success:
6706                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6707                                                         if replacement_pkg and \
6708                                                                 replacement_pkg[0].operation == "merge":
6709                                                                 # This package is being replaced anyway, so
6710                                                                 # ignore invalid dependencies so as not to
6711                                                                 # annoy the user too much (otherwise they'd be
6712                                                                 # forced to manually unmerge it first).
6713                                                                 continue
6714                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6715                                                         return False
6716                                                 blocker_atoms = [myatom for myatom in atoms \
6717                                                         if myatom.startswith("!")]
6718                                                 blocker_atoms.sort()
6719                                                 counter = long(pkg.metadata["COUNTER"])
6720                                                 blocker_cache[cpv] = \
6721                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6722                                         if blocker_atoms:
6723                                                 try:
6724                                                         for atom in blocker_atoms:
6725                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6726                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6727                                                                 self._blocker_parents.add(blocker, pkg)
6728                                                 except portage.exception.InvalidAtom, e:
6729                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6730                                                         show_invalid_depstring_notice(
6731                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6732                                                         return False
6733                                 for cpv in stale_cache:
6734                                         del blocker_cache[cpv]
6735                                 blocker_cache.flush()
6736                                 del blocker_cache
6737
6738                 # Discard any "uninstall" tasks scheduled by previous calls
6739                 # to this method, since those tasks may not make sense given
6740                 # the current graph state.
6741                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6742                 if previous_uninstall_tasks:
6743                         self._blocker_uninstalls = digraph()
6744                         self.digraph.difference_update(previous_uninstall_tasks)
6745
6746                 for blocker in self._blocker_parents.leaf_nodes():
6747                         self.spinner.update()
6748                         root_config = self.roots[blocker.root]
6749                         virtuals = root_config.settings.getvirtuals()
6750                         myroot = blocker.root
6751                         initial_db = self.trees[myroot]["vartree"].dbapi
6752                         final_db = self.mydbapi[myroot]
6753                         
6754                         provider_virtual = False
6755                         if blocker.cp in virtuals and \
6756                                 not self._have_new_virt(blocker.root, blocker.cp):
6757                                 provider_virtual = True
6758
6759                         # Use this to check PROVIDE for each matched package
6760                         # when necessary.
6761                         atom_set = InternalPackageSet(
6762                                 initial_atoms=[blocker.atom])
6763
6764                         if provider_virtual:
6765                                 atoms = []
6766                                 for provider_entry in virtuals[blocker.cp]:
6767                                         provider_cp = \
6768                                                 portage.dep_getkey(provider_entry)
6769                                         atoms.append(blocker.atom.replace(
6770                                                 blocker.cp, provider_cp))
6771                         else:
6772                                 atoms = [blocker.atom]
6773
6774                         blocked_initial = set()
6775                         for atom in atoms:
6776                                 for pkg in initial_db.match_pkgs(atom):
6777                                         if atom_set.findAtomForPackage(pkg):
6778                                                 blocked_initial.add(pkg)
6779
6780                         blocked_final = set()
6781                         for atom in atoms:
6782                                 for pkg in final_db.match_pkgs(atom):
6783                                         if atom_set.findAtomForPackage(pkg):
6784                                                 blocked_final.add(pkg)
6785
6786                         if not blocked_initial and not blocked_final:
6787                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6788                                 self._blocker_parents.remove(blocker)
6789                                 # Discard any parents that don't have any more blockers.
6790                                 for pkg in parent_pkgs:
6791                                         self._irrelevant_blockers.add(blocker, pkg)
6792                                         if not self._blocker_parents.child_nodes(pkg):
6793                                                 self._blocker_parents.remove(pkg)
6794                                 continue
6795                         for parent in self._blocker_parents.parent_nodes(blocker):
6796                                 unresolved_blocks = False
6797                                 depends_on_order = set()
6798                                 for pkg in blocked_initial:
6799                                         if pkg.slot_atom == parent.slot_atom:
6800                                                 # TODO: Support blocks within slots in cases where it
6801                                                 # might make sense.  For example, a new version might
6802                                                 # require that the old version be uninstalled at build
6803                                                 # time.
6804                                                 continue
6805                                         if parent.installed:
6806                                                 # Two currently installed packages conflict with
6807                                                 # eachother. Ignore this case since the damage
6808                                                 # is already done and this would be likely to
6809                                                 # confuse users if displayed like a normal blocker.
6810                                                 continue
6811
6812                                         self._blocked_pkgs.add(pkg, blocker)
6813
6814                                         if parent.operation == "merge":
6815                                                 # Maybe the blocked package can be replaced or simply
6816                                                 # unmerged to resolve this block.
6817                                                 depends_on_order.add((pkg, parent))
6818                                                 continue
6819                                         # None of the above blocker resolutions techniques apply,
6820                                         # so apparently this one is unresolvable.
6821                                         unresolved_blocks = True
6822                                 for pkg in blocked_final:
6823                                         if pkg.slot_atom == parent.slot_atom:
6824                                                 # TODO: Support blocks within slots.
6825                                                 continue
6826                                         if parent.operation == "nomerge" and \
6827                                                 pkg.operation == "nomerge":
6828                                                 # This blocker will be handled the next time that a
6829                                                 # merge of either package is triggered.
6830                                                 continue
6831
6832                                         self._blocked_pkgs.add(pkg, blocker)
6833
6834                                         # Maybe the blocking package can be
6835                                         # unmerged to resolve this block.
6836                                         if parent.operation == "merge" and pkg.installed:
6837                                                 depends_on_order.add((pkg, parent))
6838                                                 continue
6839                                         elif parent.operation == "nomerge":
6840                                                 depends_on_order.add((parent, pkg))
6841                                                 continue
6842                                         # None of the above blocker resolutions techniques apply,
6843                                         # so apparently this one is unresolvable.
6844                                         unresolved_blocks = True
6845
6846                                 # Make sure we don't unmerge any package that have been pulled
6847                                 # into the graph.
6848                                 if not unresolved_blocks and depends_on_order:
6849                                         for inst_pkg, inst_task in depends_on_order:
6850                                                 if self.digraph.contains(inst_pkg) and \
6851                                                         self.digraph.parent_nodes(inst_pkg):
6852                                                         unresolved_blocks = True
6853                                                         break
6854
6855                                 if not unresolved_blocks and depends_on_order:
6856                                         for inst_pkg, inst_task in depends_on_order:
6857                                                 uninst_task = Package(built=inst_pkg.built,
6858                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6859                                                         metadata=inst_pkg.metadata,
6860                                                         operation="uninstall",
6861                                                         root_config=inst_pkg.root_config,
6862                                                         type_name=inst_pkg.type_name)
6863                                                 self._pkg_cache[uninst_task] = uninst_task
6864                                                 # Enforce correct merge order with a hard dep.
6865                                                 self.digraph.addnode(uninst_task, inst_task,
6866                                                         priority=BlockerDepPriority.instance)
6867                                                 # Count references to this blocker so that it can be
6868                                                 # invalidated after nodes referencing it have been
6869                                                 # merged.
6870                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6871                                 if not unresolved_blocks and not depends_on_order:
6872                                         self._irrelevant_blockers.add(blocker, parent)
6873                                         self._blocker_parents.remove_edge(blocker, parent)
6874                                         if not self._blocker_parents.parent_nodes(blocker):
6875                                                 self._blocker_parents.remove(blocker)
6876                                         if not self._blocker_parents.child_nodes(parent):
6877                                                 self._blocker_parents.remove(parent)
6878                                 if unresolved_blocks:
6879                                         self._unsolvable_blockers.add(blocker, parent)
6880
6881                 return True
6882
6883         def _accept_blocker_conflicts(self):
6884                 acceptable = False
6885                 for x in ("--buildpkgonly", "--fetchonly",
6886                         "--fetch-all-uri", "--nodeps"):
6887                         if x in self.myopts:
6888                                 acceptable = True
6889                                 break
6890                 return acceptable
6891
6892         def _merge_order_bias(self, mygraph):
6893                 """
6894                 For optimal leaf node selection, promote deep system runtime deps and
6895                 order nodes from highest to lowest overall reference count.
6896                 """
6897
6898                 node_info = {}
6899                 for node in mygraph.order:
6900                         node_info[node] = len(mygraph.parent_nodes(node))
6901                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6902
6903                 def cmp_merge_preference(node1, node2):
6904
6905                         if node1.operation == 'uninstall':
6906                                 if node2.operation == 'uninstall':
6907                                         return 0
6908                                 return 1
6909
6910                         if node2.operation == 'uninstall':
6911                                 if node1.operation == 'uninstall':
6912                                         return 0
6913                                 return -1
6914
6915                         node1_sys = node1 in deep_system_deps
6916                         node2_sys = node2 in deep_system_deps
6917                         if node1_sys != node2_sys:
6918                                 if node1_sys:
6919                                         return -1
6920                                 return 1
6921
6922                         return node_info[node2] - node_info[node1]
6923
6924                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6925
6926         def altlist(self, reversed=False):
6927
6928                 while self._serialized_tasks_cache is None:
6929                         self._resolve_conflicts()
6930                         try:
6931                                 self._serialized_tasks_cache, self._scheduler_graph = \
6932                                         self._serialize_tasks()
6933                         except self._serialize_tasks_retry:
6934                                 pass
6935
6936                 retlist = self._serialized_tasks_cache[:]
6937                 if reversed:
6938                         retlist.reverse()
6939                 return retlist
6940
6941         def schedulerGraph(self):
6942                 """
6943                 The scheduler graph is identical to the normal one except that
6944                 uninstall edges are reversed in specific cases that require
6945                 conflicting packages to be temporarily installed simultaneously.
6946                 This is intended for use by the Scheduler in it's parallelization
6947                 logic. It ensures that temporary simultaneous installation of
6948                 conflicting packages is avoided when appropriate (especially for
6949                 !!atom blockers), but allowed in specific cases that require it.
6950
6951                 Note that this method calls break_refs() which alters the state of
6952                 internal Package instances such that this depgraph instance should
6953                 not be used to perform any more calculations.
6954                 """
6955                 if self._scheduler_graph is None:
6956                         self.altlist()
6957                 self.break_refs(self._scheduler_graph.order)
6958                 return self._scheduler_graph
6959
6960         def break_refs(self, nodes):
6961                 """
6962                 Take a mergelist like that returned from self.altlist() and
6963                 break any references that lead back to the depgraph. This is
6964                 useful if you want to hold references to packages without
6965                 also holding the depgraph on the heap.
6966                 """
6967                 for node in nodes:
6968                         if hasattr(node, "root_config"):
6969                                 # The FakeVartree references the _package_cache which
6970                                 # references the depgraph. So that Package instances don't
6971                                 # hold the depgraph and FakeVartree on the heap, replace
6972                                 # the RootConfig that references the FakeVartree with the
6973                                 # original RootConfig instance which references the actual
6974                                 # vartree.
6975                                 node.root_config = \
6976                                         self._trees_orig[node.root_config.root]["root_config"]
6977
6978         def _resolve_conflicts(self):
6979                 if not self._complete_graph():
6980                         raise self._unknown_internal_error()
6981
6982                 if not self.validate_blockers():
6983                         raise self._unknown_internal_error()
6984
6985                 if self._slot_collision_info:
6986                         self._process_slot_conflicts()
6987
6988         def _serialize_tasks(self):
6989
6990                 if "--debug" in self.myopts:
6991                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6992                         self.digraph.debug_print()
6993                         writemsg("\n", noiselevel=-1)
6994
6995                 scheduler_graph = self.digraph.copy()
6996                 mygraph=self.digraph.copy()
6997                 # Prune "nomerge" root nodes if nothing depends on them, since
6998                 # otherwise they slow down merge order calculation. Don't remove
6999                 # non-root nodes since they help optimize merge order in some cases
7000                 # such as revdep-rebuild.
7001                 removed_nodes = set()
7002                 while True:
7003                         for node in mygraph.root_nodes():
7004                                 if not isinstance(node, Package) or \
7005                                         node.installed or node.onlydeps:
7006                                         removed_nodes.add(node)
7007                         if removed_nodes:
7008                                 self.spinner.update()
7009                                 mygraph.difference_update(removed_nodes)
7010                         if not removed_nodes:
7011                                 break
7012                         removed_nodes.clear()
7013                 self._merge_order_bias(mygraph)
7014                 def cmp_circular_bias(n1, n2):
7015                         """
7016                         RDEPEND is stronger than PDEPEND and this function
7017                         measures such a strength bias within a circular
7018                         dependency relationship.
7019                         """
7020                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7021                                 ignore_priority=priority_range.ignore_medium_soft)
7022                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7023                                 ignore_priority=priority_range.ignore_medium_soft)
7024                         if n1_n2_medium == n2_n1_medium:
7025                                 return 0
7026                         elif n1_n2_medium:
7027                                 return 1
7028                         return -1
7029                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7030                 retlist=[]
7031                 # Contains uninstall tasks that have been scheduled to
7032                 # occur after overlapping blockers have been installed.
7033                 scheduled_uninstalls = set()
7034                 # Contains any Uninstall tasks that have been ignored
7035                 # in order to avoid the circular deps code path. These
7036                 # correspond to blocker conflicts that could not be
7037                 # resolved.
7038                 ignored_uninstall_tasks = set()
7039                 have_uninstall_task = False
7040                 complete = "complete" in self.myparams
7041                 asap_nodes = []
7042
7043                 def get_nodes(**kwargs):
7044                         """
7045                         Returns leaf nodes excluding Uninstall instances
7046                         since those should be executed as late as possible.
7047                         """
7048                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7049                                 if isinstance(node, Package) and \
7050                                         (node.operation != "uninstall" or \
7051                                         node in scheduled_uninstalls)]
7052
7053                 # sys-apps/portage needs special treatment if ROOT="/"
7054                 running_root = self._running_root.root
7055                 from portage.const import PORTAGE_PACKAGE_ATOM
7056                 runtime_deps = InternalPackageSet(
7057                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7058                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7059                         PORTAGE_PACKAGE_ATOM)
7060                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7061                         PORTAGE_PACKAGE_ATOM)
7062
7063                 if running_portage:
7064                         running_portage = running_portage[0]
7065                 else:
7066                         running_portage = None
7067
7068                 if replacement_portage:
7069                         replacement_portage = replacement_portage[0]
7070                 else:
7071                         replacement_portage = None
7072
7073                 if replacement_portage == running_portage:
7074                         replacement_portage = None
7075
7076                 if replacement_portage is not None:
7077                         # update from running_portage to replacement_portage asap
7078                         asap_nodes.append(replacement_portage)
7079
7080                 if running_portage is not None:
7081                         try:
7082                                 portage_rdepend = self._select_atoms_highest_available(
7083                                         running_root, running_portage.metadata["RDEPEND"],
7084                                         myuse=running_portage.use.enabled,
7085                                         parent=running_portage, strict=False)
7086                         except portage.exception.InvalidDependString, e:
7087                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7088                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7089                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7090                                 del e
7091                                 portage_rdepend = []
7092                         runtime_deps.update(atom for atom in portage_rdepend \
7093                                 if not atom.startswith("!"))
7094
7095                 def gather_deps(ignore_priority, mergeable_nodes,
7096                         selected_nodes, node):
7097                         """
7098                         Recursively gather a group of nodes that RDEPEND on
7099                         eachother. This ensures that they are merged as a group
7100                         and get their RDEPENDs satisfied as soon as possible.
7101                         """
7102                         if node in selected_nodes:
7103                                 return True
7104                         if node not in mergeable_nodes:
7105                                 return False
7106                         if node == replacement_portage and \
7107                                 mygraph.child_nodes(node,
7108                                 ignore_priority=priority_range.ignore_medium_soft):
7109                                 # Make sure that portage always has all of it's
7110                                 # RDEPENDs installed first.
7111                                 return False
7112                         selected_nodes.add(node)
7113                         for child in mygraph.child_nodes(node,
7114                                 ignore_priority=ignore_priority):
7115                                 if not gather_deps(ignore_priority,
7116                                         mergeable_nodes, selected_nodes, child):
7117                                         return False
7118                         return True
7119
7120                 def ignore_uninst_or_med(priority):
7121                         if priority is BlockerDepPriority.instance:
7122                                 return True
7123                         return priority_range.ignore_medium(priority)
7124
7125                 def ignore_uninst_or_med_soft(priority):
7126                         if priority is BlockerDepPriority.instance:
7127                                 return True
7128                         return priority_range.ignore_medium_soft(priority)
7129
7130                 tree_mode = "--tree" in self.myopts
7131                 # Tracks whether or not the current iteration should prefer asap_nodes
7132                 # if available.  This is set to False when the previous iteration
7133                 # failed to select any nodes.  It is reset whenever nodes are
7134                 # successfully selected.
7135                 prefer_asap = True
7136
7137                 # Controls whether or not the current iteration should drop edges that
7138                 # are "satisfied" by installed packages, in order to solve circular
7139                 # dependencies. The deep runtime dependencies of installed packages are
7140                 # not checked in this case (bug #199856), so it must be avoided
7141                 # whenever possible.
7142                 drop_satisfied = False
7143
7144                 # State of variables for successive iterations that loosen the
7145                 # criteria for node selection.
7146                 #
7147                 # iteration   prefer_asap   drop_satisfied
7148                 # 1           True          False
7149                 # 2           False         False
7150                 # 3           False         True
7151                 #
7152                 # If no nodes are selected on the last iteration, it is due to
7153                 # unresolved blockers or circular dependencies.
7154
7155                 while not mygraph.empty():
7156                         self.spinner.update()
7157                         selected_nodes = None
7158                         ignore_priority = None
7159                         if drop_satisfied or (prefer_asap and asap_nodes):
7160                                 priority_range = DepPrioritySatisfiedRange
7161                         else:
7162                                 priority_range = DepPriorityNormalRange
7163                         if prefer_asap and asap_nodes:
7164                                 # ASAP nodes are merged before their soft deps. Go ahead and
7165                                 # select root nodes here if necessary, since it's typical for
7166                                 # the parent to have been removed from the graph already.
7167                                 asap_nodes = [node for node in asap_nodes \
7168                                         if mygraph.contains(node)]
7169                                 for node in asap_nodes:
7170                                         if not mygraph.child_nodes(node,
7171                                                 ignore_priority=priority_range.ignore_soft):
7172                                                 selected_nodes = [node]
7173                                                 asap_nodes.remove(node)
7174                                                 break
7175                         if not selected_nodes and \
7176                                 not (prefer_asap and asap_nodes):
7177                                 for i in xrange(priority_range.NONE,
7178                                         priority_range.MEDIUM_SOFT + 1):
7179                                         ignore_priority = priority_range.ignore_priority[i]
7180                                         nodes = get_nodes(ignore_priority=ignore_priority)
7181                                         if nodes:
7182                                                 # If there is a mix of uninstall nodes with other
7183                                                 # types, save the uninstall nodes for later since
7184                                                 # sometimes a merge node will render an uninstall
7185                                                 # node unnecessary (due to occupying the same slot),
7186                                                 # and we want to avoid executing a separate uninstall
7187                                                 # task in that case.
7188                                                 if len(nodes) > 1:
7189                                                         good_uninstalls = []
7190                                                         with_some_uninstalls_excluded = []
7191                                                         for node in nodes:
7192                                                                 if node.operation == "uninstall":
7193                                                                         slot_node = self.mydbapi[node.root
7194                                                                                 ].match_pkgs(node.slot_atom)
7195                                                                         if slot_node and \
7196                                                                                 slot_node[0].operation == "merge":
7197                                                                                 continue
7198                                                                         good_uninstalls.append(node)
7199                                                                 with_some_uninstalls_excluded.append(node)
7200                                                         if good_uninstalls:
7201                                                                 nodes = good_uninstalls
7202                                                         elif with_some_uninstalls_excluded:
7203                                                                 nodes = with_some_uninstalls_excluded
7204                                                         else:
7205                                                                 nodes = nodes
7206
7207                                                 if ignore_priority is None and not tree_mode:
7208                                                         # Greedily pop all of these nodes since no
7209                                                         # relationship has been ignored. This optimization
7210                                                         # destroys --tree output, so it's disabled in tree
7211                                                         # mode.
7212                                                         selected_nodes = nodes
7213                                                 else:
7214                                                         # For optimal merge order:
7215                                                         #  * Only pop one node.
7216                                                         #  * Removing a root node (node without a parent)
7217                                                         #    will not produce a leaf node, so avoid it.
7218                                                         #  * It's normal for a selected uninstall to be a
7219                                                         #    root node, so don't check them for parents.
7220                                                         for node in nodes:
7221                                                                 if node.operation == "uninstall" or \
7222                                                                         mygraph.parent_nodes(node):
7223                                                                         selected_nodes = [node]
7224                                                                         break
7225
7226                                                 if selected_nodes:
7227                                                         break
7228
7229                         if not selected_nodes:
7230                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7231                                 if nodes:
7232                                         mergeable_nodes = set(nodes)
7233                                         if prefer_asap and asap_nodes:
7234                                                 nodes = asap_nodes
7235                                         for i in xrange(priority_range.SOFT,
7236                                                 priority_range.MEDIUM_SOFT + 1):
7237                                                 ignore_priority = priority_range.ignore_priority[i]
7238                                                 for node in nodes:
7239                                                         if not mygraph.parent_nodes(node):
7240                                                                 continue
7241                                                         selected_nodes = set()
7242                                                         if gather_deps(ignore_priority,
7243                                                                 mergeable_nodes, selected_nodes, node):
7244                                                                 break
7245                                                         else:
7246                                                                 selected_nodes = None
7247                                                 if selected_nodes:
7248                                                         break
7249
7250                                         if prefer_asap and asap_nodes and not selected_nodes:
7251                                                 # We failed to find any asap nodes to merge, so ignore
7252                                                 # them for the next iteration.
7253                                                 prefer_asap = False
7254                                                 continue
7255
7256                         if selected_nodes and ignore_priority is not None:
7257                                 # Try to merge ignored medium_soft deps as soon as possible
7258                                 # if they're not satisfied by installed packages.
7259                                 for node in selected_nodes:
7260                                         children = set(mygraph.child_nodes(node))
7261                                         soft = children.difference(
7262                                                 mygraph.child_nodes(node,
7263                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7264                                         medium_soft = children.difference(
7265                                                 mygraph.child_nodes(node,
7266                                                         ignore_priority = \
7267                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7268                                         medium_soft.difference_update(soft)
7269                                         for child in medium_soft:
7270                                                 if child in selected_nodes:
7271                                                         continue
7272                                                 if child in asap_nodes:
7273                                                         continue
7274                                                 asap_nodes.append(child)
7275
7276                         if selected_nodes and len(selected_nodes) > 1:
7277                                 if not isinstance(selected_nodes, list):
7278                                         selected_nodes = list(selected_nodes)
7279                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7280
7281                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7282                                 # An Uninstall task needs to be executed in order to
7283                                 # avoid conflict if possible.
7284
7285                                 if drop_satisfied:
7286                                         priority_range = DepPrioritySatisfiedRange
7287                                 else:
7288                                         priority_range = DepPriorityNormalRange
7289
7290                                 mergeable_nodes = get_nodes(
7291                                         ignore_priority=ignore_uninst_or_med)
7292
7293                                 min_parent_deps = None
7294                                 uninst_task = None
7295                                 for task in myblocker_uninstalls.leaf_nodes():
7296                                         # Do some sanity checks so that system or world packages
7297                                         # don't get uninstalled inappropriately here (only really
7298                                         # necessary when --complete-graph has not been enabled).
7299
7300                                         if task in ignored_uninstall_tasks:
7301                                                 continue
7302
7303                                         if task in scheduled_uninstalls:
7304                                                 # It's been scheduled but it hasn't
7305                                                 # been executed yet due to dependence
7306                                                 # on installation of blocking packages.
7307                                                 continue
7308
7309                                         root_config = self.roots[task.root]
7310                                         inst_pkg = self._pkg_cache[
7311                                                 ("installed", task.root, task.cpv, "nomerge")]
7312
7313                                         if self.digraph.contains(inst_pkg):
7314                                                 continue
7315
7316                                         forbid_overlap = False
7317                                         heuristic_overlap = False
7318                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7319                                                 if blocker.eapi in ("0", "1"):
7320                                                         heuristic_overlap = True
7321                                                 elif blocker.atom.blocker.overlap.forbid:
7322                                                         forbid_overlap = True
7323                                                         break
7324                                         if forbid_overlap and running_root == task.root:
7325                                                 continue
7326
7327                                         if heuristic_overlap and running_root == task.root:
7328                                                 # Never uninstall sys-apps/portage or it's essential
7329                                                 # dependencies, except through replacement.
7330                                                 try:
7331                                                         runtime_dep_atoms = \
7332                                                                 list(runtime_deps.iterAtomsForPackage(task))
7333                                                 except portage.exception.InvalidDependString, e:
7334                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7335                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7336                                                                 (task.root, task.cpv, e), noiselevel=-1)
7337                                                         del e
7338                                                         continue
7339
7340                                                 # Don't uninstall a runtime dep if it appears
7341                                                 # to be the only suitable one installed.
7342                                                 skip = False
7343                                                 vardb = root_config.trees["vartree"].dbapi
7344                                                 for atom in runtime_dep_atoms:
7345                                                         other_version = None
7346                                                         for pkg in vardb.match_pkgs(atom):
7347                                                                 if pkg.cpv == task.cpv and \
7348                                                                         pkg.metadata["COUNTER"] == \
7349                                                                         task.metadata["COUNTER"]:
7350                                                                         continue
7351                                                                 other_version = pkg
7352                                                                 break
7353                                                         if other_version is None:
7354                                                                 skip = True
7355                                                                 break
7356                                                 if skip:
7357                                                         continue
7358
7359                                                 # For packages in the system set, don't take
7360                                                 # any chances. If the conflict can't be resolved
7361                                                 # by a normal replacement operation then abort.
7362                                                 skip = False
7363                                                 try:
7364                                                         for atom in root_config.sets[
7365                                                                 "system"].iterAtomsForPackage(task):
7366                                                                 skip = True
7367                                                                 break
7368                                                 except portage.exception.InvalidDependString, e:
7369                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7370                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7371                                                                 (task.root, task.cpv, e), noiselevel=-1)
7372                                                         del e
7373                                                         skip = True
7374                                                 if skip:
7375                                                         continue
7376
7377                                         # Note that the world check isn't always
7378                                         # necessary since self._complete_graph() will
7379                                         # add all packages from the system and world sets to the
7380                                         # graph. This just allows unresolved conflicts to be
7381                                         # detected as early as possible, which makes it possible
7382                                         # to avoid calling self._complete_graph() when it is
7383                                         # unnecessary due to blockers triggering an abortion.
7384                                         if not complete:
7385                                                 # For packages in the world set, go ahead an uninstall
7386                                                 # when necessary, as long as the atom will be satisfied
7387                                                 # in the final state.
7388                                                 graph_db = self.mydbapi[task.root]
7389                                                 skip = False
7390                                                 try:
7391                                                         for atom in root_config.sets[
7392                                                                 "world"].iterAtomsForPackage(task):
7393                                                                 satisfied = False
7394                                                                 for pkg in graph_db.match_pkgs(atom):
7395                                                                         if pkg == inst_pkg:
7396                                                                                 continue
7397                                                                         satisfied = True
7398                                                                         break
7399                                                                 if not satisfied:
7400                                                                         skip = True
7401                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7402                                                                         break
7403                                                 except portage.exception.InvalidDependString, e:
7404                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7405                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7406                                                                 (task.root, task.cpv, e), noiselevel=-1)
7407                                                         del e
7408                                                         skip = True
7409                                                 if skip:
7410                                                         continue
7411
7412                                         # Check the deps of parent nodes to ensure that
7413                                         # the chosen task produces a leaf node. Maybe
7414                                         # this can be optimized some more to make the
7415                                         # best possible choice, but the current algorithm
7416                                         # is simple and should be near optimal for most
7417                                         # common cases.
7418                                         mergeable_parent = False
7419                                         parent_deps = set()
7420                                         for parent in mygraph.parent_nodes(task):
7421                                                 parent_deps.update(mygraph.child_nodes(parent,
7422                                                         ignore_priority=priority_range.ignore_medium_soft))
7423                                                 if parent in mergeable_nodes and \
7424                                                         gather_deps(ignore_uninst_or_med_soft,
7425                                                         mergeable_nodes, set(), parent):
7426                                                         mergeable_parent = True
7427
7428                                         if not mergeable_parent:
7429                                                 continue
7430
7431                                         parent_deps.remove(task)
7432                                         if min_parent_deps is None or \
7433                                                 len(parent_deps) < min_parent_deps:
7434                                                 min_parent_deps = len(parent_deps)
7435                                                 uninst_task = task
7436
7437                                 if uninst_task is not None:
7438                                         # The uninstall is performed only after blocking
7439                                         # packages have been merged on top of it. File
7440                                         # collisions between blocking packages are detected
7441                                         # and removed from the list of files to be uninstalled.
7442                                         scheduled_uninstalls.add(uninst_task)
7443                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7444
7445                                         # Reverse the parent -> uninstall edges since we want
7446                                         # to do the uninstall after blocking packages have
7447                                         # been merged on top of it.
7448                                         mygraph.remove(uninst_task)
7449                                         for blocked_pkg in parent_nodes:
7450                                                 mygraph.add(blocked_pkg, uninst_task,
7451                                                         priority=BlockerDepPriority.instance)
7452                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7453                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7454                                                         priority=BlockerDepPriority.instance)
7455
7456                                         # Reset the state variables for leaf node selection and
7457                                         # continue trying to select leaf nodes.
7458                                         prefer_asap = True
7459                                         drop_satisfied = False
7460                                         continue
7461
7462                         if not selected_nodes:
7463                                 # Only select root nodes as a last resort. This case should
7464                                 # only trigger when the graph is nearly empty and the only
7465                                 # remaining nodes are isolated (no parents or children). Since
7466                                 # the nodes must be isolated, ignore_priority is not needed.
7467                                 selected_nodes = get_nodes()
7468
7469                         if not selected_nodes and not drop_satisfied:
7470                                 drop_satisfied = True
7471                                 continue
7472
7473                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7474                                 # If possible, drop an uninstall task here in order to avoid
7475                                 # the circular deps code path. The corresponding blocker will
7476                                 # still be counted as an unresolved conflict.
7477                                 uninst_task = None
7478                                 for node in myblocker_uninstalls.leaf_nodes():
7479                                         try:
7480                                                 mygraph.remove(node)
7481                                         except KeyError:
7482                                                 pass
7483                                         else:
7484                                                 uninst_task = node
7485                                                 ignored_uninstall_tasks.add(node)
7486                                                 break
7487
7488                                 if uninst_task is not None:
7489                                         # Reset the state variables for leaf node selection and
7490                                         # continue trying to select leaf nodes.
7491                                         prefer_asap = True
7492                                         drop_satisfied = False
7493                                         continue
7494
7495                         if not selected_nodes:
7496                                 self._circular_deps_for_display = mygraph
7497                                 raise self._unknown_internal_error()
7498
7499                         # At this point, we've succeeded in selecting one or more nodes, so
7500                         # reset state variables for leaf node selection.
7501                         prefer_asap = True
7502                         drop_satisfied = False
7503
7504                         mygraph.difference_update(selected_nodes)
7505
7506                         for node in selected_nodes:
7507                                 if isinstance(node, Package) and \
7508                                         node.operation == "nomerge":
7509                                         continue
7510
7511                                 # Handle interactions between blockers
7512                                 # and uninstallation tasks.
7513                                 solved_blockers = set()
7514                                 uninst_task = None
7515                                 if isinstance(node, Package) and \
7516                                         "uninstall" == node.operation:
7517                                         have_uninstall_task = True
7518                                         uninst_task = node
7519                                 else:
7520                                         vardb = self.trees[node.root]["vartree"].dbapi
7521                                         previous_cpv = vardb.match(node.slot_atom)
7522                                         if previous_cpv:
7523                                                 # The package will be replaced by this one, so remove
7524                                                 # the corresponding Uninstall task if necessary.
7525                                                 previous_cpv = previous_cpv[0]
7526                                                 uninst_task = \
7527                                                         ("installed", node.root, previous_cpv, "uninstall")
7528                                                 try:
7529                                                         mygraph.remove(uninst_task)
7530                                                 except KeyError:
7531                                                         pass
7532
7533                                 if uninst_task is not None and \
7534                                         uninst_task not in ignored_uninstall_tasks and \
7535                                         myblocker_uninstalls.contains(uninst_task):
7536                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7537                                         myblocker_uninstalls.remove(uninst_task)
7538                                         # Discard any blockers that this Uninstall solves.
7539                                         for blocker in blocker_nodes:
7540                                                 if not myblocker_uninstalls.child_nodes(blocker):
7541                                                         myblocker_uninstalls.remove(blocker)
7542                                                         solved_blockers.add(blocker)
7543
7544                                 retlist.append(node)
7545
7546                                 if (isinstance(node, Package) and \
7547                                         "uninstall" == node.operation) or \
7548                                         (uninst_task is not None and \
7549                                         uninst_task in scheduled_uninstalls):
7550                                         # Include satisfied blockers in the merge list
7551                                         # since the user might be interested and also
7552                                         # it serves as an indicator that blocking packages
7553                                         # will be temporarily installed simultaneously.
7554                                         for blocker in solved_blockers:
7555                                                 retlist.append(Blocker(atom=blocker.atom,
7556                                                         root=blocker.root, eapi=blocker.eapi,
7557                                                         satisfied=True))
7558
7559                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7560                 for node in myblocker_uninstalls.root_nodes():
7561                         unsolvable_blockers.add(node)
7562
7563                 for blocker in unsolvable_blockers:
7564                         retlist.append(blocker)
7565
7566                 # If any Uninstall tasks need to be executed in order
7567                 # to avoid a conflict, complete the graph with any
7568                 # dependencies that may have been initially
7569                 # neglected (to ensure that unsafe Uninstall tasks
7570                 # are properly identified and blocked from execution).
7571                 if have_uninstall_task and \
7572                         not complete and \
7573                         not unsolvable_blockers:
7574                         self.myparams.add("complete")
7575                         raise self._serialize_tasks_retry("")
7576
7577                 if unsolvable_blockers and \
7578                         not self._accept_blocker_conflicts():
7579                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7580                         self._serialized_tasks_cache = retlist[:]
7581                         self._scheduler_graph = scheduler_graph
7582                         raise self._unknown_internal_error()
7583
7584                 if self._slot_collision_info and \
7585                         not self._accept_blocker_conflicts():
7586                         self._serialized_tasks_cache = retlist[:]
7587                         self._scheduler_graph = scheduler_graph
7588                         raise self._unknown_internal_error()
7589
7590                 return retlist, scheduler_graph
7591
7592         def _show_circular_deps(self, mygraph):
7593                 # No leaf nodes are available, so we have a circular
7594                 # dependency panic situation.  Reduce the noise level to a
7595                 # minimum via repeated elimination of root nodes since they
7596                 # have no parents and thus can not be part of a cycle.
7597                 while True:
7598                         root_nodes = mygraph.root_nodes(
7599                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7600                         if not root_nodes:
7601                                 break
7602                         mygraph.difference_update(root_nodes)
7603                 # Display the USE flags that are enabled on nodes that are part
7604                 # of dependency cycles in case that helps the user decide to
7605                 # disable some of them.
7606                 display_order = []
7607                 tempgraph = mygraph.copy()
7608                 while not tempgraph.empty():
7609                         nodes = tempgraph.leaf_nodes()
7610                         if not nodes:
7611                                 node = tempgraph.order[0]
7612                         else:
7613                                 node = nodes[0]
7614                         display_order.append(node)
7615                         tempgraph.remove(node)
7616                 display_order.reverse()
7617                 self.myopts.pop("--quiet", None)
7618                 self.myopts.pop("--verbose", None)
7619                 self.myopts["--tree"] = True
7620                 portage.writemsg("\n\n", noiselevel=-1)
7621                 self.display(display_order)
7622                 prefix = colorize("BAD", " * ")
7623                 portage.writemsg("\n", noiselevel=-1)
7624                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7625                         noiselevel=-1)
7626                 portage.writemsg("\n", noiselevel=-1)
7627                 mygraph.debug_print()
7628                 portage.writemsg("\n", noiselevel=-1)
7629                 portage.writemsg(prefix + "Note that circular dependencies " + \
7630                         "can often be avoided by temporarily\n", noiselevel=-1)
7631                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7632                         "optional dependencies.\n", noiselevel=-1)
7633
7634         def _show_merge_list(self):
7635                 if self._serialized_tasks_cache is not None and \
7636                         not (self._displayed_list and \
7637                         (self._displayed_list == self._serialized_tasks_cache or \
7638                         self._displayed_list == \
7639                                 list(reversed(self._serialized_tasks_cache)))):
7640                         display_list = self._serialized_tasks_cache[:]
7641                         if "--tree" in self.myopts:
7642                                 display_list.reverse()
7643                         self.display(display_list)
7644
7645         def _show_unsatisfied_blockers(self, blockers):
7646                 self._show_merge_list()
7647                 msg = "Error: The above package list contains " + \
7648                         "packages which cannot be installed " + \
7649                         "at the same time on the same system."
7650                 prefix = colorize("BAD", " * ")
7651                 from textwrap import wrap
7652                 portage.writemsg("\n", noiselevel=-1)
7653                 for line in wrap(msg, 70):
7654                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7655
7656                 # Display the conflicting packages along with the packages
7657                 # that pulled them in. This is helpful for troubleshooting
7658                 # cases in which blockers don't solve automatically and
7659                 # the reasons are not apparent from the normal merge list
7660                 # display.
7661
7662                 conflict_pkgs = {}
7663                 for blocker in blockers:
7664                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7665                                 self._blocker_parents.parent_nodes(blocker)):
7666                                 parent_atoms = self._parent_atoms.get(pkg)
7667                                 if not parent_atoms:
7668                                         atom = self._blocked_world_pkgs.get(pkg)
7669                                         if atom is not None:
7670                                                 parent_atoms = set([("@world", atom)])
7671                                 if parent_atoms:
7672                                         conflict_pkgs[pkg] = parent_atoms
7673
7674                 if conflict_pkgs:
7675                         # Reduce noise by pruning packages that are only
7676                         # pulled in by other conflict packages.
7677                         pruned_pkgs = set()
7678                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7679                                 relevant_parent = False
7680                                 for parent, atom in parent_atoms:
7681                                         if parent not in conflict_pkgs:
7682                                                 relevant_parent = True
7683                                                 break
7684                                 if not relevant_parent:
7685                                         pruned_pkgs.add(pkg)
7686                         for pkg in pruned_pkgs:
7687                                 del conflict_pkgs[pkg]
7688
7689                 if conflict_pkgs:
7690                         msg = []
7691                         msg.append("\n")
7692                         indent = "  "
7693                         # Max number of parents shown, to avoid flooding the display.
7694                         max_parents = 3
7695                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7696
7697                                 pruned_list = set()
7698
7699                                 # Prefer packages that are not directly involved in a conflict.
7700                                 for parent_atom in parent_atoms:
7701                                         if len(pruned_list) >= max_parents:
7702                                                 break
7703                                         parent, atom = parent_atom
7704                                         if parent not in conflict_pkgs:
7705                                                 pruned_list.add(parent_atom)
7706
7707                                 for parent_atom in parent_atoms:
7708                                         if len(pruned_list) >= max_parents:
7709                                                 break
7710                                         pruned_list.add(parent_atom)
7711
7712                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7713                                 msg.append(indent + "%s pulled in by\n" % pkg)
7714
7715                                 for parent_atom in pruned_list:
7716                                         parent, atom = parent_atom
7717                                         msg.append(2*indent)
7718                                         if isinstance(parent,
7719                                                 (PackageArg, AtomArg)):
7720                                                 # For PackageArg and AtomArg types, it's
7721                                                 # redundant to display the atom attribute.
7722                                                 msg.append(str(parent))
7723                                         else:
7724                                                 # Display the specific atom from SetArg or
7725                                                 # Package types.
7726                                                 msg.append("%s required by %s" % (atom, parent))
7727                                         msg.append("\n")
7728
7729                                 if omitted_parents:
7730                                         msg.append(2*indent)
7731                                         msg.append("(and %d more)\n" % omitted_parents)
7732
7733                                 msg.append("\n")
7734
7735                         sys.stderr.write("".join(msg))
7736                         sys.stderr.flush()
7737
7738                 if "--quiet" not in self.myopts:
7739                         show_blocker_docs_link()
7740
7741         def display(self, mylist, favorites=[], verbosity=None):
7742
7743                 # This is used to prevent display_problems() from
7744                 # redundantly displaying this exact same merge list
7745                 # again via _show_merge_list().
7746                 self._displayed_list = mylist
7747
7748                 if verbosity is None:
7749                         verbosity = ("--quiet" in self.myopts and 1 or \
7750                                 "--verbose" in self.myopts and 3 or 2)
7751                 favorites_set = InternalPackageSet(favorites)
7752                 oneshot = "--oneshot" in self.myopts or \
7753                         "--onlydeps" in self.myopts
7754                 columns = "--columns" in self.myopts
7755                 changelogs=[]
7756                 p=[]
7757                 blockers = []
7758
7759                 counters = PackageCounters()
7760
7761                 if verbosity == 1 and "--verbose" not in self.myopts:
7762                         def create_use_string(*args):
7763                                 return ""
7764                 else:
7765                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7766                                 old_iuse, old_use,
7767                                 is_new, reinst_flags,
7768                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7769                                 alphabetical=("--alphabetical" in self.myopts)):
7770                                 enabled = []
7771                                 if alphabetical:
7772                                         disabled = enabled
7773                                         removed = enabled
7774                                 else:
7775                                         disabled = []
7776                                         removed = []
7777                                 cur_iuse = set(cur_iuse)
7778                                 enabled_flags = cur_iuse.intersection(cur_use)
7779                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7780                                 any_iuse = cur_iuse.union(old_iuse)
7781                                 any_iuse = list(any_iuse)
7782                                 any_iuse.sort()
7783                                 for flag in any_iuse:
7784                                         flag_str = None
7785                                         isEnabled = False
7786                                         reinst_flag = reinst_flags and flag in reinst_flags
7787                                         if flag in enabled_flags:
7788                                                 isEnabled = True
7789                                                 if is_new or flag in old_use and \
7790                                                         (all_flags or reinst_flag):
7791                                                         flag_str = red(flag)
7792                                                 elif flag not in old_iuse:
7793                                                         flag_str = yellow(flag) + "%*"
7794                                                 elif flag not in old_use:
7795                                                         flag_str = green(flag) + "*"
7796                                         elif flag in removed_iuse:
7797                                                 if all_flags or reinst_flag:
7798                                                         flag_str = yellow("-" + flag) + "%"
7799                                                         if flag in old_use:
7800                                                                 flag_str += "*"
7801                                                         flag_str = "(" + flag_str + ")"
7802                                                         removed.append(flag_str)
7803                                                 continue
7804                                         else:
7805                                                 if is_new or flag in old_iuse and \
7806                                                         flag not in old_use and \
7807                                                         (all_flags or reinst_flag):
7808                                                         flag_str = blue("-" + flag)
7809                                                 elif flag not in old_iuse:
7810                                                         flag_str = yellow("-" + flag)
7811                                                         if flag not in iuse_forced:
7812                                                                 flag_str += "%"
7813                                                 elif flag in old_use:
7814                                                         flag_str = green("-" + flag) + "*"
7815                                         if flag_str:
7816                                                 if flag in iuse_forced:
7817                                                         flag_str = "(" + flag_str + ")"
7818                                                 if isEnabled:
7819                                                         enabled.append(flag_str)
7820                                                 else:
7821                                                         disabled.append(flag_str)
7822
7823                                 if alphabetical:
7824                                         ret = " ".join(enabled)
7825                                 else:
7826                                         ret = " ".join(enabled + disabled + removed)
7827                                 if ret:
7828                                         ret = '%s="%s" ' % (name, ret)
7829                                 return ret
7830
7831                 repo_display = RepoDisplay(self.roots)
7832
7833                 tree_nodes = []
7834                 display_list = []
7835                 mygraph = self.digraph.copy()
7836
7837                 # If there are any Uninstall instances, add the corresponding
7838                 # blockers to the digraph (useful for --tree display).
7839
7840                 executed_uninstalls = set(node for node in mylist \
7841                         if isinstance(node, Package) and node.operation == "unmerge")
7842
7843                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7844                         uninstall_parents = \
7845                                 self._blocker_uninstalls.parent_nodes(uninstall)
7846                         if not uninstall_parents:
7847                                 continue
7848
7849                         # Remove the corresponding "nomerge" node and substitute
7850                         # the Uninstall node.
7851                         inst_pkg = self._pkg_cache[
7852                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7853                         try:
7854                                 mygraph.remove(inst_pkg)
7855                         except KeyError:
7856                                 pass
7857
7858                         try:
7859                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7860                         except KeyError:
7861                                 inst_pkg_blockers = []
7862
7863                         # Break the Package -> Uninstall edges.
7864                         mygraph.remove(uninstall)
7865
7866                         # Resolution of a package's blockers
7867                         # depend on it's own uninstallation.
7868                         for blocker in inst_pkg_blockers:
7869                                 mygraph.add(uninstall, blocker)
7870
7871                         # Expand Package -> Uninstall edges into
7872                         # Package -> Blocker -> Uninstall edges.
7873                         for blocker in uninstall_parents:
7874                                 mygraph.add(uninstall, blocker)
7875                                 for parent in self._blocker_parents.parent_nodes(blocker):
7876                                         if parent != inst_pkg:
7877                                                 mygraph.add(blocker, parent)
7878
7879                         # If the uninstall task did not need to be executed because
7880                         # of an upgrade, display Blocker -> Upgrade edges since the
7881                         # corresponding Blocker -> Uninstall edges will not be shown.
7882                         upgrade_node = \
7883                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7884                         if upgrade_node is not None and \
7885                                 uninstall not in executed_uninstalls:
7886                                 for blocker in uninstall_parents:
7887                                         mygraph.add(upgrade_node, blocker)
7888
7889                 unsatisfied_blockers = []
7890                 i = 0
7891                 depth = 0
7892                 shown_edges = set()
7893                 for x in mylist:
7894                         if isinstance(x, Blocker) and not x.satisfied:
7895                                 unsatisfied_blockers.append(x)
7896                                 continue
7897                         graph_key = x
7898                         if "--tree" in self.myopts:
7899                                 depth = len(tree_nodes)
7900                                 while depth and graph_key not in \
7901                                         mygraph.child_nodes(tree_nodes[depth-1]):
7902                                                 depth -= 1
7903                                 if depth:
7904                                         tree_nodes = tree_nodes[:depth]
7905                                         tree_nodes.append(graph_key)
7906                                         display_list.append((x, depth, True))
7907                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7908                                 else:
7909                                         traversed_nodes = set() # prevent endless circles
7910                                         traversed_nodes.add(graph_key)
7911                                         def add_parents(current_node, ordered):
7912                                                 parent_nodes = None
7913                                                 # Do not traverse to parents if this node is an
7914                                                 # an argument or a direct member of a set that has
7915                                                 # been specified as an argument (system or world).
7916                                                 if current_node not in self._set_nodes:
7917                                                         parent_nodes = mygraph.parent_nodes(current_node)
7918                                                 if parent_nodes:
7919                                                         child_nodes = set(mygraph.child_nodes(current_node))
7920                                                         selected_parent = None
7921                                                         # First, try to avoid a direct cycle.
7922                                                         for node in parent_nodes:
7923                                                                 if not isinstance(node, (Blocker, Package)):
7924                                                                         continue
7925                                                                 if node not in traversed_nodes and \
7926                                                                         node not in child_nodes:
7927                                                                         edge = (current_node, node)
7928                                                                         if edge in shown_edges:
7929                                                                                 continue
7930                                                                         selected_parent = node
7931                                                                         break
7932                                                         if not selected_parent:
7933                                                                 # A direct cycle is unavoidable.
7934                                                                 for node in parent_nodes:
7935                                                                         if not isinstance(node, (Blocker, Package)):
7936                                                                                 continue
7937                                                                         if node not in traversed_nodes:
7938                                                                                 edge = (current_node, node)
7939                                                                                 if edge in shown_edges:
7940                                                                                         continue
7941                                                                                 selected_parent = node
7942                                                                                 break
7943                                                         if selected_parent:
7944                                                                 shown_edges.add((current_node, selected_parent))
7945                                                                 traversed_nodes.add(selected_parent)
7946                                                                 add_parents(selected_parent, False)
7947                                                 display_list.append((current_node,
7948                                                         len(tree_nodes), ordered))
7949                                                 tree_nodes.append(current_node)
7950                                         tree_nodes = []
7951                                         add_parents(graph_key, True)
7952                         else:
7953                                 display_list.append((x, depth, True))
7954                 mylist = display_list
7955                 for x in unsatisfied_blockers:
7956                         mylist.append((x, 0, True))
7957
7958                 last_merge_depth = 0
7959                 for i in xrange(len(mylist)-1,-1,-1):
7960                         graph_key, depth, ordered = mylist[i]
7961                         if not ordered and depth == 0 and i > 0 \
7962                                 and graph_key == mylist[i-1][0] and \
7963                                 mylist[i-1][1] == 0:
7964                                 # An ordered node got a consecutive duplicate when the tree was
7965                                 # being filled in.
7966                                 del mylist[i]
7967                                 continue
7968                         if ordered and graph_key[-1] != "nomerge":
7969                                 last_merge_depth = depth
7970                                 continue
7971                         if depth >= last_merge_depth or \
7972                                 i < len(mylist) - 1 and \
7973                                 depth >= mylist[i+1][1]:
7974                                         del mylist[i]
7975
7976                 from portage import flatten
7977                 from portage.dep import use_reduce, paren_reduce
7978                 # files to fetch list - avoids counting a same file twice
7979                 # in size display (verbose mode)
7980                 myfetchlist=[]
7981
7982                 # Use this set to detect when all the "repoadd" strings are "[0]"
7983                 # and disable the entire repo display in this case.
7984                 repoadd_set = set()
7985
7986                 for mylist_index in xrange(len(mylist)):
7987                         x, depth, ordered = mylist[mylist_index]
7988                         pkg_type = x[0]
7989                         myroot = x[1]
7990                         pkg_key = x[2]
7991                         portdb = self.trees[myroot]["porttree"].dbapi
7992                         bindb  = self.trees[myroot]["bintree"].dbapi
7993                         vardb = self.trees[myroot]["vartree"].dbapi
7994                         vartree = self.trees[myroot]["vartree"]
7995                         pkgsettings = self.pkgsettings[myroot]
7996
7997                         fetch=" "
7998                         indent = " " * depth
7999
8000                         if isinstance(x, Blocker):
8001                                 if x.satisfied:
8002                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8003                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8004                                 else:
8005                                         blocker_style = "PKG_BLOCKER"
8006                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8007                                 if ordered:
8008                                         counters.blocks += 1
8009                                         if x.satisfied:
8010                                                 counters.blocks_satisfied += 1
8011                                 resolved = portage.key_expand(
8012                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8013                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8014                                         addl += " " + colorize(blocker_style, resolved)
8015                                 else:
8016                                         addl = "[%s %s] %s%s" % \
8017                                                 (colorize(blocker_style, "blocks"),
8018                                                 addl, indent, colorize(blocker_style, resolved))
8019                                 block_parents = self._blocker_parents.parent_nodes(x)
8020                                 block_parents = set([pnode[2] for pnode in block_parents])
8021                                 block_parents = ", ".join(block_parents)
8022                                 if resolved!=x[2]:
8023                                         addl += colorize(blocker_style,
8024                                                 " (\"%s\" is blocking %s)") % \
8025                                                 (str(x.atom).lstrip("!"), block_parents)
8026                                 else:
8027                                         addl += colorize(blocker_style,
8028                                                 " (is blocking %s)") % block_parents
8029                                 if isinstance(x, Blocker) and x.satisfied:
8030                                         if columns:
8031                                                 continue
8032                                         p.append(addl)
8033                                 else:
8034                                         blockers.append(addl)
8035                         else:
8036                                 pkg_status = x[3]
8037                                 pkg_merge = ordered and pkg_status == "merge"
8038                                 if not pkg_merge and pkg_status == "merge":
8039                                         pkg_status = "nomerge"
8040                                 built = pkg_type != "ebuild"
8041                                 installed = pkg_type == "installed"
8042                                 pkg = x
8043                                 metadata = pkg.metadata
8044                                 ebuild_path = None
8045                                 repo_name = metadata["repository"]
8046                                 if pkg_type == "ebuild":
8047                                         ebuild_path = portdb.findname(pkg_key)
8048                                         if not ebuild_path: # shouldn't happen
8049                                                 raise portage.exception.PackageNotFound(pkg_key)
8050                                         repo_path_real = os.path.dirname(os.path.dirname(
8051                                                 os.path.dirname(ebuild_path)))
8052                                 else:
8053                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8054                                 pkg_use = list(pkg.use.enabled)
8055                                 try:
8056                                         restrict = flatten(use_reduce(paren_reduce(
8057                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8058                                 except portage.exception.InvalidDependString, e:
8059                                         if not pkg.installed:
8060                                                 show_invalid_depstring_notice(x,
8061                                                         pkg.metadata["RESTRICT"], str(e))
8062                                                 del e
8063                                                 return 1
8064                                         restrict = []
8065                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8066                                         "fetch" in restrict:
8067                                         fetch = red("F")
8068                                         if ordered:
8069                                                 counters.restrict_fetch += 1
8070                                         if portdb.fetch_check(pkg_key, pkg_use):
8071                                                 fetch = green("f")
8072                                                 if ordered:
8073                                                         counters.restrict_fetch_satisfied += 1
8074
8075                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8076                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8077                                 myoldbest = []
8078                                 myinslotlist = None
8079                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8080                                 if vardb.cpv_exists(pkg_key):
8081                                         addl="  "+yellow("R")+fetch+"  "
8082                                         if ordered:
8083                                                 if pkg_merge:
8084                                                         counters.reinst += 1
8085                                                 elif pkg_status == "uninstall":
8086                                                         counters.uninst += 1
8087                                 # filter out old-style virtual matches
8088                                 elif installed_versions and \
8089                                         portage.cpv_getkey(installed_versions[0]) == \
8090                                         portage.cpv_getkey(pkg_key):
8091                                         myinslotlist = vardb.match(pkg.slot_atom)
8092                                         # If this is the first install of a new-style virtual, we
8093                                         # need to filter out old-style virtual matches.
8094                                         if myinslotlist and \
8095                                                 portage.cpv_getkey(myinslotlist[0]) != \
8096                                                 portage.cpv_getkey(pkg_key):
8097                                                 myinslotlist = None
8098                                         if myinslotlist:
8099                                                 myoldbest = myinslotlist[:]
8100                                                 addl = "   " + fetch
8101                                                 if not portage.dep.cpvequal(pkg_key,
8102                                                         portage.best([pkg_key] + myoldbest)):
8103                                                         # Downgrade in slot
8104                                                         addl += turquoise("U")+blue("D")
8105                                                         if ordered:
8106                                                                 counters.downgrades += 1
8107                                                 else:
8108                                                         # Update in slot
8109                                                         addl += turquoise("U") + " "
8110                                                         if ordered:
8111                                                                 counters.upgrades += 1
8112                                         else:
8113                                                 # New slot, mark it new.
8114                                                 addl = " " + green("NS") + fetch + "  "
8115                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8116                                                 if ordered:
8117                                                         counters.newslot += 1
8118
8119                                         if "--changelog" in self.myopts:
8120                                                 inst_matches = vardb.match(pkg.slot_atom)
8121                                                 if inst_matches:
8122                                                         changelogs.extend(self.calc_changelog(
8123                                                                 portdb.findname(pkg_key),
8124                                                                 inst_matches[0], pkg_key))
8125                                 else:
8126                                         addl = " " + green("N") + " " + fetch + "  "
8127                                         if ordered:
8128                                                 counters.new += 1
8129
8130                                 verboseadd = ""
8131                                 repoadd = None
8132
8133                                 if True:
8134                                         # USE flag display
8135                                         forced_flags = set()
8136                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8137                                         forced_flags.update(pkgsettings.useforce)
8138                                         forced_flags.update(pkgsettings.usemask)
8139
8140                                         cur_use = [flag for flag in pkg.use.enabled \
8141                                                 if flag in pkg.iuse.all]
8142                                         cur_iuse = sorted(pkg.iuse.all)
8143
8144                                         if myoldbest and myinslotlist:
8145                                                 previous_cpv = myoldbest[0]
8146                                         else:
8147                                                 previous_cpv = pkg.cpv
8148                                         if vardb.cpv_exists(previous_cpv):
8149                                                 old_iuse, old_use = vardb.aux_get(
8150                                                                 previous_cpv, ["IUSE", "USE"])
8151                                                 old_iuse = list(set(
8152                                                         filter_iuse_defaults(old_iuse.split())))
8153                                                 old_iuse.sort()
8154                                                 old_use = old_use.split()
8155                                                 is_new = False
8156                                         else:
8157                                                 old_iuse = []
8158                                                 old_use = []
8159                                                 is_new = True
8160
8161                                         old_use = [flag for flag in old_use if flag in old_iuse]
8162
8163                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8164                                         use_expand.sort()
8165                                         use_expand.reverse()
8166                                         use_expand_hidden = \
8167                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8168
8169                                         def map_to_use_expand(myvals, forcedFlags=False,
8170                                                 removeHidden=True):
8171                                                 ret = {}
8172                                                 forced = {}
8173                                                 for exp in use_expand:
8174                                                         ret[exp] = []
8175                                                         forced[exp] = set()
8176                                                         for val in myvals[:]:
8177                                                                 if val.startswith(exp.lower()+"_"):
8178                                                                         if val in forced_flags:
8179                                                                                 forced[exp].add(val[len(exp)+1:])
8180                                                                         ret[exp].append(val[len(exp)+1:])
8181                                                                         myvals.remove(val)
8182                                                 ret["USE"] = myvals
8183                                                 forced["USE"] = [val for val in myvals \
8184                                                         if val in forced_flags]
8185                                                 if removeHidden:
8186                                                         for exp in use_expand_hidden:
8187                                                                 ret.pop(exp, None)
8188                                                 if forcedFlags:
8189                                                         return ret, forced
8190                                                 return ret
8191
8192                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8193                                         # are the only thing that triggered reinstallation.
8194                                         reinst_flags_map = {}
8195                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8196                                         reinst_expand_map = None
8197                                         if reinstall_for_flags:
8198                                                 reinst_flags_map = map_to_use_expand(
8199                                                         list(reinstall_for_flags), removeHidden=False)
8200                                                 for k in list(reinst_flags_map):
8201                                                         if not reinst_flags_map[k]:
8202                                                                 del reinst_flags_map[k]
8203                                                 if not reinst_flags_map.get("USE"):
8204                                                         reinst_expand_map = reinst_flags_map.copy()
8205                                                         reinst_expand_map.pop("USE", None)
8206                                         if reinst_expand_map and \
8207                                                 not set(reinst_expand_map).difference(
8208                                                 use_expand_hidden):
8209                                                 use_expand_hidden = \
8210                                                         set(use_expand_hidden).difference(
8211                                                         reinst_expand_map)
8212
8213                                         cur_iuse_map, iuse_forced = \
8214                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8215                                         cur_use_map = map_to_use_expand(cur_use)
8216                                         old_iuse_map = map_to_use_expand(old_iuse)
8217                                         old_use_map = map_to_use_expand(old_use)
8218
8219                                         use_expand.sort()
8220                                         use_expand.insert(0, "USE")
8221                                         
8222                                         for key in use_expand:
8223                                                 if key in use_expand_hidden:
8224                                                         continue
8225                                                 verboseadd += create_use_string(key.upper(),
8226                                                         cur_iuse_map[key], iuse_forced[key],
8227                                                         cur_use_map[key], old_iuse_map[key],
8228                                                         old_use_map[key], is_new,
8229                                                         reinst_flags_map.get(key))
8230
8231                                 if verbosity == 3:
8232                                         # size verbose
8233                                         mysize=0
8234                                         if pkg_type == "ebuild" and pkg_merge:
8235                                                 try:
8236                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8237                                                                 useflags=pkg_use, debug=self.edebug)
8238                                                 except portage.exception.InvalidDependString, e:
8239                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8240                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8241                                                         del e
8242                                                         return 1
8243                                                 if myfilesdict is None:
8244                                                         myfilesdict="[empty/missing/bad digest]"
8245                                                 else:
8246                                                         for myfetchfile in myfilesdict:
8247                                                                 if myfetchfile not in myfetchlist:
8248                                                                         mysize+=myfilesdict[myfetchfile]
8249                                                                         myfetchlist.append(myfetchfile)
8250                                                         if ordered:
8251                                                                 counters.totalsize += mysize
8252                                                 verboseadd += format_size(mysize)
8253
8254                                         # overlay verbose
8255                                         # assign index for a previous version in the same slot
8256                                         has_previous = False
8257                                         repo_name_prev = None
8258                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8259                                                 metadata["SLOT"])
8260                                         slot_matches = vardb.match(slot_atom)
8261                                         if slot_matches:
8262                                                 has_previous = True
8263                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8264                                                         ["repository"])[0]
8265
8266                                         # now use the data to generate output
8267                                         if pkg.installed or not has_previous:
8268                                                 repoadd = repo_display.repoStr(repo_path_real)
8269                                         else:
8270                                                 repo_path_prev = None
8271                                                 if repo_name_prev:
8272                                                         repo_path_prev = portdb.getRepositoryPath(
8273                                                                 repo_name_prev)
8274                                                 if repo_path_prev == repo_path_real:
8275                                                         repoadd = repo_display.repoStr(repo_path_real)
8276                                                 else:
8277                                                         repoadd = "%s=>%s" % (
8278                                                                 repo_display.repoStr(repo_path_prev),
8279                                                                 repo_display.repoStr(repo_path_real))
8280                                         if repoadd:
8281                                                 repoadd_set.add(repoadd)
8282
8283                                 xs = [portage.cpv_getkey(pkg_key)] + \
8284                                         list(portage.catpkgsplit(pkg_key)[2:])
8285                                 if xs[2] == "r0":
8286                                         xs[2] = ""
8287                                 else:
8288                                         xs[2] = "-" + xs[2]
8289
8290                                 mywidth = 130
8291                                 if "COLUMNWIDTH" in self.settings:
8292                                         try:
8293                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8294                                         except ValueError, e:
8295                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8296                                                 portage.writemsg(
8297                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8298                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8299                                                 del e
8300                                 oldlp = mywidth - 30
8301                                 newlp = oldlp - 30
8302
8303                                 # Convert myoldbest from a list to a string.
8304                                 if not myoldbest:
8305                                         myoldbest = ""
8306                                 else:
8307                                         for pos, key in enumerate(myoldbest):
8308                                                 key = portage.catpkgsplit(key)[2] + \
8309                                                         "-" + portage.catpkgsplit(key)[3]
8310                                                 if key[-3:] == "-r0":
8311                                                         key = key[:-3]
8312                                                 myoldbest[pos] = key
8313                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8314
8315                                 pkg_cp = xs[0]
8316                                 root_config = self.roots[myroot]
8317                                 system_set = root_config.sets["system"]
8318                                 world_set  = root_config.sets["world"]
8319
8320                                 pkg_system = False
8321                                 pkg_world = False
8322                                 try:
8323                                         pkg_system = system_set.findAtomForPackage(pkg)
8324                                         pkg_world  = world_set.findAtomForPackage(pkg)
8325                                         if not (oneshot or pkg_world) and \
8326                                                 myroot == self.target_root and \
8327                                                 favorites_set.findAtomForPackage(pkg):
8328                                                 # Maybe it will be added to world now.
8329                                                 if create_world_atom(pkg, favorites_set, root_config):
8330                                                         pkg_world = True
8331                                 except portage.exception.InvalidDependString:
8332                                         # This is reported elsewhere if relevant.
8333                                         pass
8334
8335                                 def pkgprint(pkg_str):
8336                                         if pkg_merge:
8337                                                 if pkg_system:
8338                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8339                                                 elif pkg_world:
8340                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8341                                                 else:
8342                                                         return colorize("PKG_MERGE", pkg_str)
8343                                         elif pkg_status == "uninstall":
8344                                                 return colorize("PKG_UNINSTALL", pkg_str)
8345                                         else:
8346                                                 if pkg_system:
8347                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8348                                                 elif pkg_world:
8349                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8350                                                 else:
8351                                                         return colorize("PKG_NOMERGE", pkg_str)
8352
8353                                 try:
8354                                         properties = flatten(use_reduce(paren_reduce(
8355                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8356                                 except portage.exception.InvalidDependString, e:
8357                                         if not pkg.installed:
8358                                                 show_invalid_depstring_notice(pkg,
8359                                                         pkg.metadata["PROPERTIES"], str(e))
8360                                                 del e
8361                                                 return 1
8362                                         properties = []
8363                                 interactive = "interactive" in properties
8364                                 if interactive and pkg.operation == "merge":
8365                                         addl = colorize("WARN", "I") + addl[1:]
8366                                         if ordered:
8367                                                 counters.interactive += 1
8368
8369                                 if x[1]!="/":
8370                                         if myoldbest:
8371                                                 myoldbest +=" "
8372                                         if "--columns" in self.myopts:
8373                                                 if "--quiet" in self.myopts:
8374                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8375                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8376                                                         myprint=myprint+myoldbest
8377                                                         myprint=myprint+darkgreen("to "+x[1])
8378                                                         verboseadd = None
8379                                                 else:
8380                                                         if not pkg_merge:
8381                                                                 myprint = "[%s] %s%s" % \
8382                                                                         (pkgprint(pkg_status.ljust(13)),
8383                                                                         indent, pkgprint(pkg.cp))
8384                                                         else:
8385                                                                 myprint = "[%s %s] %s%s" % \
8386                                                                         (pkgprint(pkg.type_name), addl,
8387                                                                         indent, pkgprint(pkg.cp))
8388                                                         if (newlp-nc_len(myprint)) > 0:
8389                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8390                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8391                                                         if (oldlp-nc_len(myprint)) > 0:
8392                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8393                                                         myprint=myprint+myoldbest
8394                                                         myprint += darkgreen("to " + pkg.root)
8395                                         else:
8396                                                 if not pkg_merge:
8397                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8398                                                 else:
8399                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8400                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8401                                                         myoldbest + darkgreen("to " + myroot)
8402                                 else:
8403                                         if "--columns" in self.myopts:
8404                                                 if "--quiet" in self.myopts:
8405                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8406                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8407                                                         myprint=myprint+myoldbest
8408                                                         verboseadd = None
8409                                                 else:
8410                                                         if not pkg_merge:
8411                                                                 myprint = "[%s] %s%s" % \
8412                                                                         (pkgprint(pkg_status.ljust(13)),
8413                                                                         indent, pkgprint(pkg.cp))
8414                                                         else:
8415                                                                 myprint = "[%s %s] %s%s" % \
8416                                                                         (pkgprint(pkg.type_name), addl,
8417                                                                         indent, pkgprint(pkg.cp))
8418                                                         if (newlp-nc_len(myprint)) > 0:
8419                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8420                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8421                                                         if (oldlp-nc_len(myprint)) > 0:
8422                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8423                                                         myprint += myoldbest
8424                                         else:
8425                                                 if not pkg_merge:
8426                                                         myprint = "[%s] %s%s %s" % \
8427                                                                 (pkgprint(pkg_status.ljust(13)),
8428                                                                 indent, pkgprint(pkg.cpv),
8429                                                                 myoldbest)
8430                                                 else:
8431                                                         myprint = "[%s %s] %s%s %s" % \
8432                                                                 (pkgprint(pkg_type), addl, indent,
8433                                                                 pkgprint(pkg.cpv), myoldbest)
8434
8435                                 if columns and pkg.operation == "uninstall":
8436                                         continue
8437                                 p.append((myprint, verboseadd, repoadd))
8438
8439                                 if "--tree" not in self.myopts and \
8440                                         "--quiet" not in self.myopts and \
8441                                         not self._opts_no_restart.intersection(self.myopts) and \
8442                                         pkg.root == self._running_root.root and \
8443                                         portage.match_from_list(
8444                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8445                                         not vardb.cpv_exists(pkg.cpv) and \
8446                                         "--quiet" not in self.myopts:
8447                                                 if mylist_index < len(mylist) - 1:
8448                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8449                                                         p.append(colorize("WARN", "    then resume the merge."))
8450
8451                 out = sys.stdout
8452                 show_repos = repoadd_set and repoadd_set != set(["0"])
8453
8454                 for x in p:
8455                         if isinstance(x, basestring):
8456                                 out.write("%s\n" % (x,))
8457                                 continue
8458
8459                         myprint, verboseadd, repoadd = x
8460
8461                         if verboseadd:
8462                                 myprint += " " + verboseadd
8463
8464                         if show_repos and repoadd:
8465                                 myprint += " " + teal("[%s]" % repoadd)
8466
8467                         out.write("%s\n" % (myprint,))
8468
8469                 for x in blockers:
8470                         print x
8471
8472                 if verbosity == 3:
8473                         print
8474                         print counters
8475                         if show_repos:
8476                                 sys.stdout.write(str(repo_display))
8477
8478                 if "--changelog" in self.myopts:
8479                         print
8480                         for revision,text in changelogs:
8481                                 print bold('*'+revision)
8482                                 sys.stdout.write(text)
8483
8484                 sys.stdout.flush()
8485                 return os.EX_OK
8486
8487         def display_problems(self):
8488                 """
8489                 Display problems with the dependency graph such as slot collisions.
8490                 This is called internally by display() to show the problems _after_
8491                 the merge list where it is most likely to be seen, but if display()
8492                 is not going to be called then this method should be called explicitly
8493                 to ensure that the user is notified of problems with the graph.
8494
8495                 All output goes to stderr, except for unsatisfied dependencies which
8496                 go to stdout for parsing by programs such as autounmask.
8497                 """
8498
8499                 # Note that show_masked_packages() sends it's output to
8500                 # stdout, and some programs such as autounmask parse the
8501                 # output in cases when emerge bails out. However, when
8502                 # show_masked_packages() is called for installed packages
8503                 # here, the message is a warning that is more appropriate
8504                 # to send to stderr, so temporarily redirect stdout to
8505                 # stderr. TODO: Fix output code so there's a cleaner way
8506                 # to redirect everything to stderr.
8507                 sys.stdout.flush()
8508                 sys.stderr.flush()
8509                 stdout = sys.stdout
8510                 try:
8511                         sys.stdout = sys.stderr
8512                         self._display_problems()
8513                 finally:
8514                         sys.stdout = stdout
8515                         sys.stdout.flush()
8516                         sys.stderr.flush()
8517
8518                 # This goes to stdout for parsing by programs like autounmask.
8519                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8520                         self._show_unsatisfied_dep(*pargs, **kwargs)
8521
8522         def _display_problems(self):
8523                 if self._circular_deps_for_display is not None:
8524                         self._show_circular_deps(
8525                                 self._circular_deps_for_display)
8526
8527                 # The user is only notified of a slot conflict if
8528                 # there are no unresolvable blocker conflicts.
8529                 if self._unsatisfied_blockers_for_display is not None:
8530                         self._show_unsatisfied_blockers(
8531                                 self._unsatisfied_blockers_for_display)
8532                 else:
8533                         self._show_slot_collision_notice()
8534
8535                 # TODO: Add generic support for "set problem" handlers so that
8536                 # the below warnings aren't special cases for world only.
8537
8538                 if self._missing_args:
8539                         world_problems = False
8540                         if "world" in self._sets:
8541                                 # Filter out indirect members of world (from nested sets)
8542                                 # since only direct members of world are desired here.
8543                                 world_set = self.roots[self.target_root].sets["world"]
8544                                 for arg, atom in self._missing_args:
8545                                         if arg.name == "world" and atom in world_set:
8546                                                 world_problems = True
8547                                                 break
8548
8549                         if world_problems:
8550                                 sys.stderr.write("\n!!! Problems have been " + \
8551                                         "detected with your world file\n")
8552                                 sys.stderr.write("!!! Please run " + \
8553                                         green("emaint --check world")+"\n\n")
8554
8555                 if self._missing_args:
8556                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8557                                 " Ebuilds for the following packages are either all\n")
8558                         sys.stderr.write(colorize("BAD", "!!!") + \
8559                                 " masked or don't exist:\n")
8560                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8561                                 self._missing_args) + "\n")
8562
8563                 if self._pprovided_args:
8564                         arg_refs = {}
8565                         for arg, atom in self._pprovided_args:
8566                                 if isinstance(arg, SetArg):
8567                                         parent = arg.name
8568                                         arg_atom = (atom, atom)
8569                                 else:
8570                                         parent = "args"
8571                                         arg_atom = (arg.arg, atom)
8572                                 refs = arg_refs.setdefault(arg_atom, [])
8573                                 if parent not in refs:
8574                                         refs.append(parent)
8575                         msg = []
8576                         msg.append(bad("\nWARNING: "))
8577                         if len(self._pprovided_args) > 1:
8578                                 msg.append("Requested packages will not be " + \
8579                                         "merged because they are listed in\n")
8580                         else:
8581                                 msg.append("A requested package will not be " + \
8582                                         "merged because it is listed in\n")
8583                         msg.append("package.provided:\n\n")
8584                         problems_sets = set()
8585                         for (arg, atom), refs in arg_refs.iteritems():
8586                                 ref_string = ""
8587                                 if refs:
8588                                         problems_sets.update(refs)
8589                                         refs.sort()
8590                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8591                                         ref_string = " pulled in by " + ref_string
8592                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8593                         msg.append("\n")
8594                         if "world" in problems_sets:
8595                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8596                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8597                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8598                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8599                                 msg.append("The best course of action depends on the reason that an offending\n")
8600                                 msg.append("package.provided entry exists.\n\n")
8601                         sys.stderr.write("".join(msg))
8602
8603                 masked_packages = []
8604                 for pkg in self._masked_installed:
8605                         root_config = pkg.root_config
8606                         pkgsettings = self.pkgsettings[pkg.root]
8607                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8608                         masked_packages.append((root_config, pkgsettings,
8609                                 pkg.cpv, pkg.metadata, mreasons))
8610                 if masked_packages:
8611                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8612                                 " The following installed packages are masked:\n")
8613                         show_masked_packages(masked_packages)
8614                         show_mask_docs()
8615                         print
8616
8617         def calc_changelog(self,ebuildpath,current,next):
8618                 if ebuildpath == None or not os.path.exists(ebuildpath):
8619                         return []
8620                 current = '-'.join(portage.catpkgsplit(current)[1:])
8621                 if current.endswith('-r0'):
8622                         current = current[:-3]
8623                 next = '-'.join(portage.catpkgsplit(next)[1:])
8624                 if next.endswith('-r0'):
8625                         next = next[:-3]
8626                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8627                 try:
8628                         changelog = open(changelogpath).read()
8629                 except SystemExit, e:
8630                         raise # Needed else can't exit
8631                 except:
8632                         return []
8633                 divisions = self.find_changelog_tags(changelog)
8634                 #print 'XX from',current,'to',next
8635                 #for div,text in divisions: print 'XX',div
8636                 # skip entries for all revisions above the one we are about to emerge
8637                 for i in range(len(divisions)):
8638                         if divisions[i][0]==next:
8639                                 divisions = divisions[i:]
8640                                 break
8641                 # find out how many entries we are going to display
8642                 for i in range(len(divisions)):
8643                         if divisions[i][0]==current:
8644                                 divisions = divisions[:i]
8645                                 break
8646                 else:
8647                     # couldnt find the current revision in the list. display nothing
8648                         return []
8649                 return divisions
8650
8651         def find_changelog_tags(self,changelog):
8652                 divs = []
8653                 release = None
8654                 while 1:
8655                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8656                         if match is None:
8657                                 if release is not None:
8658                                         divs.append((release,changelog))
8659                                 return divs
8660                         if release is not None:
8661                                 divs.append((release,changelog[:match.start()]))
8662                         changelog = changelog[match.end():]
8663                         release = match.group(1)
8664                         if release.endswith('.ebuild'):
8665                                 release = release[:-7]
8666                         if release.endswith('-r0'):
8667                                 release = release[:-3]
8668
8669         def saveNomergeFavorites(self):
8670                 """Find atoms in favorites that are not in the mergelist and add them
8671                 to the world file if necessary."""
8672                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8673                         "--oneshot", "--onlydeps", "--pretend"):
8674                         if x in self.myopts:
8675                                 return
8676                 root_config = self.roots[self.target_root]
8677                 world_set = root_config.sets["world"]
8678
8679                 world_locked = False
8680                 if hasattr(world_set, "lock"):
8681                         world_set.lock()
8682                         world_locked = True
8683
8684                 if hasattr(world_set, "load"):
8685                         world_set.load() # maybe it's changed on disk
8686
8687                 args_set = self._sets["args"]
8688                 portdb = self.trees[self.target_root]["porttree"].dbapi
8689                 added_favorites = set()
8690                 for x in self._set_nodes:
8691                         pkg_type, root, pkg_key, pkg_status = x
8692                         if pkg_status != "nomerge":
8693                                 continue
8694
8695                         try:
8696                                 myfavkey = create_world_atom(x, args_set, root_config)
8697                                 if myfavkey:
8698                                         if myfavkey in added_favorites:
8699                                                 continue
8700                                         added_favorites.add(myfavkey)
8701                         except portage.exception.InvalidDependString, e:
8702                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8703                                         (pkg_key, str(e)), noiselevel=-1)
8704                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8705                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8706                                 del e
8707                 all_added = []
8708                 for k in self._sets:
8709                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8710                                 continue
8711                         s = SETPREFIX + k
8712                         if s in world_set:
8713                                 continue
8714                         all_added.append(SETPREFIX + k)
8715                 all_added.extend(added_favorites)
8716                 all_added.sort()
8717                 for a in all_added:
8718                         print ">>> Recording %s in \"world\" favorites file..." % \
8719                                 colorize("INFORM", str(a))
8720                 if all_added:
8721                         world_set.update(all_added)
8722
8723                 if world_locked:
8724                         world_set.unlock()
8725
8726         def loadResumeCommand(self, resume_data, skip_masked=False):
8727                 """
8728                 Add a resume command to the graph and validate it in the process.  This
8729                 will raise a PackageNotFound exception if a package is not available.
8730                 """
8731
8732                 if not isinstance(resume_data, dict):
8733                         return False
8734
8735                 mergelist = resume_data.get("mergelist")
8736                 if not isinstance(mergelist, list):
8737                         mergelist = []
8738
8739                 fakedb = self.mydbapi
8740                 trees = self.trees
8741                 serialized_tasks = []
8742                 masked_tasks = []
8743                 for x in mergelist:
8744                         if not (isinstance(x, list) and len(x) == 4):
8745                                 continue
8746                         pkg_type, myroot, pkg_key, action = x
8747                         if pkg_type not in self.pkg_tree_map:
8748                                 continue
8749                         if action != "merge":
8750                                 continue
8751                         tree_type = self.pkg_tree_map[pkg_type]
8752                         mydb = trees[myroot][tree_type].dbapi
8753                         db_keys = list(self._trees_orig[myroot][
8754                                 tree_type].dbapi._aux_cache_keys)
8755                         try:
8756                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8757                         except KeyError:
8758                                 # It does no exist or it is corrupt.
8759                                 if action == "uninstall":
8760                                         continue
8761                                 raise portage.exception.PackageNotFound(pkg_key)
8762                         installed = action == "uninstall"
8763                         built = pkg_type != "ebuild"
8764                         root_config = self.roots[myroot]
8765                         pkg = Package(built=built, cpv=pkg_key,
8766                                 installed=installed, metadata=metadata,
8767                                 operation=action, root_config=root_config,
8768                                 type_name=pkg_type)
8769                         if pkg_type == "ebuild":
8770                                 pkgsettings = self.pkgsettings[myroot]
8771                                 pkgsettings.setcpv(pkg)
8772                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8773                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8774                         self._pkg_cache[pkg] = pkg
8775
8776                         root_config = self.roots[pkg.root]
8777                         if "merge" == pkg.operation and \
8778                                 not visible(root_config.settings, pkg):
8779                                 if skip_masked:
8780                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8781                                 else:
8782                                         self._unsatisfied_deps_for_display.append(
8783                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8784
8785                         fakedb[myroot].cpv_inject(pkg)
8786                         serialized_tasks.append(pkg)
8787                         self.spinner.update()
8788
8789                 if self._unsatisfied_deps_for_display:
8790                         return False
8791
8792                 if not serialized_tasks or "--nodeps" in self.myopts:
8793                         self._serialized_tasks_cache = serialized_tasks
8794                         self._scheduler_graph = self.digraph
8795                 else:
8796                         self._select_package = self._select_pkg_from_graph
8797                         self.myparams.add("selective")
8798                         # Always traverse deep dependencies in order to account for
8799                         # potentially unsatisfied dependencies of installed packages.
8800                         # This is necessary for correct --keep-going or --resume operation
8801                         # in case a package from a group of circularly dependent packages
8802                         # fails. In this case, a package which has recently been installed
8803                         # may have an unsatisfied circular dependency (pulled in by
8804                         # PDEPEND, for example). So, even though a package is already
8805                         # installed, it may not have all of it's dependencies satisfied, so
8806                         # it may not be usable. If such a package is in the subgraph of
8807                         # deep depenedencies of a scheduled build, that build needs to
8808                         # be cancelled. In order for this type of situation to be
8809                         # recognized, deep traversal of dependencies is required.
8810                         self.myparams.add("deep")
8811
8812                         favorites = resume_data.get("favorites")
8813                         args_set = self._sets["args"]
8814                         if isinstance(favorites, list):
8815                                 args = self._load_favorites(favorites)
8816                         else:
8817                                 args = []
8818
8819                         for task in serialized_tasks:
8820                                 if isinstance(task, Package) and \
8821                                         task.operation == "merge":
8822                                         if not self._add_pkg(task, None):
8823                                                 return False
8824
8825                         # Packages for argument atoms need to be explicitly
8826                         # added via _add_pkg() so that they are included in the
8827                         # digraph (needed at least for --tree display).
8828                         for arg in args:
8829                                 for atom in arg.set:
8830                                         pkg, existing_node = self._select_package(
8831                                                 arg.root_config.root, atom)
8832                                         if existing_node is None and \
8833                                                 pkg is not None:
8834                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8835                                                         root=pkg.root, parent=arg)):
8836                                                         return False
8837
8838                         # Allow unsatisfied deps here to avoid showing a masking
8839                         # message for an unsatisfied dep that isn't necessarily
8840                         # masked.
8841                         if not self._create_graph(allow_unsatisfied=True):
8842                                 return False
8843
8844                         unsatisfied_deps = []
8845                         for dep in self._unsatisfied_deps:
8846                                 if not isinstance(dep.parent, Package):
8847                                         continue
8848                                 if dep.parent.operation == "merge":
8849                                         unsatisfied_deps.append(dep)
8850                                         continue
8851
8852                                 # For unsatisfied deps of installed packages, only account for
8853                                 # them if they are in the subgraph of dependencies of a package
8854                                 # which is scheduled to be installed.
8855                                 unsatisfied_install = False
8856                                 traversed = set()
8857                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8858                                 while dep_stack:
8859                                         node = dep_stack.pop()
8860                                         if not isinstance(node, Package):
8861                                                 continue
8862                                         if node.operation == "merge":
8863                                                 unsatisfied_install = True
8864                                                 break
8865                                         if node in traversed:
8866                                                 continue
8867                                         traversed.add(node)
8868                                         dep_stack.extend(self.digraph.parent_nodes(node))
8869
8870                                 if unsatisfied_install:
8871                                         unsatisfied_deps.append(dep)
8872
8873                         if masked_tasks or unsatisfied_deps:
8874                                 # This probably means that a required package
8875                                 # was dropped via --skipfirst. It makes the
8876                                 # resume list invalid, so convert it to a
8877                                 # UnsatisfiedResumeDep exception.
8878                                 raise self.UnsatisfiedResumeDep(self,
8879                                         masked_tasks + unsatisfied_deps)
8880                         self._serialized_tasks_cache = None
8881                         try:
8882                                 self.altlist()
8883                         except self._unknown_internal_error:
8884                                 return False
8885
8886                 return True
8887
8888         def _load_favorites(self, favorites):
8889                 """
8890                 Use a list of favorites to resume state from a
8891                 previous select_files() call. This creates similar
8892                 DependencyArg instances to those that would have
8893                 been created by the original select_files() call.
8894                 This allows Package instances to be matched with
8895                 DependencyArg instances during graph creation.
8896                 """
8897                 root_config = self.roots[self.target_root]
8898                 getSetAtoms = root_config.setconfig.getSetAtoms
8899                 sets = root_config.sets
8900                 args = []
8901                 for x in favorites:
8902                         if not isinstance(x, basestring):
8903                                 continue
8904                         if x in ("system", "world"):
8905                                 x = SETPREFIX + x
8906                         if x.startswith(SETPREFIX):
8907                                 s = x[len(SETPREFIX):]
8908                                 if s not in sets:
8909                                         continue
8910                                 if s in self._sets:
8911                                         continue
8912                                 # Recursively expand sets so that containment tests in
8913                                 # self._get_parent_sets() properly match atoms in nested
8914                                 # sets (like if world contains system).
8915                                 expanded_set = InternalPackageSet(
8916                                         initial_atoms=getSetAtoms(s))
8917                                 self._sets[s] = expanded_set
8918                                 args.append(SetArg(arg=x, set=expanded_set,
8919                                         root_config=root_config))
8920                         else:
8921                                 if not portage.isvalidatom(x):
8922                                         continue
8923                                 args.append(AtomArg(arg=x, atom=x,
8924                                         root_config=root_config))
8925
8926                 self._set_args(args)
8927                 return args
8928
8929         class UnsatisfiedResumeDep(portage.exception.PortageException):
8930                 """
8931                 A dependency of a resume list is not installed. This
8932                 can occur when a required package is dropped from the
8933                 merge list via --skipfirst.
8934                 """
8935                 def __init__(self, depgraph, value):
8936                         portage.exception.PortageException.__init__(self, value)
8937                         self.depgraph = depgraph
8938
8939         class _internal_exception(portage.exception.PortageException):
8940                 def __init__(self, value=""):
8941                         portage.exception.PortageException.__init__(self, value)
8942
8943         class _unknown_internal_error(_internal_exception):
8944                 """
8945                 Used by the depgraph internally to terminate graph creation.
8946                 The specific reason for the failure should have been dumped
8947                 to stderr, unfortunately, the exact reason for the failure
8948                 may not be known.
8949                 """
8950
8951         class _serialize_tasks_retry(_internal_exception):
8952                 """
8953                 This is raised by the _serialize_tasks() method when it needs to
8954                 be called again for some reason. The only case that it's currently
8955                 used for is when neglected dependencies need to be added to the
8956                 graph in order to avoid making a potentially unsafe decision.
8957                 """
8958
8959         class _dep_check_composite_db(portage.dbapi):
8960                 """
8961                 A dbapi-like interface that is optimized for use in dep_check() calls.
8962                 This is built on top of the existing depgraph package selection logic.
8963                 Some packages that have been added to the graph may be masked from this
8964                 view in order to influence the atom preference selection that occurs
8965                 via dep_check().
8966                 """
8967                 def __init__(self, depgraph, root):
8968                         portage.dbapi.__init__(self)
8969                         self._depgraph = depgraph
8970                         self._root = root
8971                         self._match_cache = {}
8972                         self._cpv_pkg_map = {}
8973
8974                 def _clear_cache(self):
8975                         self._match_cache.clear()
8976                         self._cpv_pkg_map.clear()
8977
8978                 def match(self, atom):
8979                         ret = self._match_cache.get(atom)
8980                         if ret is not None:
8981                                 return ret[:]
8982                         orig_atom = atom
8983                         if "/" not in atom:
8984                                 atom = self._dep_expand(atom)
8985                         pkg, existing = self._depgraph._select_package(self._root, atom)
8986                         if not pkg:
8987                                 ret = []
8988                         else:
8989                                 # Return the highest available from select_package() as well as
8990                                 # any matching slots in the graph db.
8991                                 slots = set()
8992                                 slots.add(pkg.metadata["SLOT"])
8993                                 atom_cp = portage.dep_getkey(atom)
8994                                 if pkg.cp.startswith("virtual/"):
8995                                         # For new-style virtual lookahead that occurs inside
8996                                         # dep_check(), examine all slots. This is needed
8997                                         # so that newer slots will not unnecessarily be pulled in
8998                                         # when a satisfying lower slot is already installed. For
8999                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9000                                         # there's no need to pull in a newer slot to satisfy a
9001                                         # virtual/jdk dependency.
9002                                         for db, pkg_type, built, installed, db_keys in \
9003                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9004                                                 for cpv in db.match(atom):
9005                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9006                                                                 continue
9007                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9008                                 ret = []
9009                                 if self._visible(pkg):
9010                                         self._cpv_pkg_map[pkg.cpv] = pkg
9011                                         ret.append(pkg.cpv)
9012                                 slots.remove(pkg.metadata["SLOT"])
9013                                 while slots:
9014                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9015                                         pkg, existing = self._depgraph._select_package(
9016                                                 self._root, slot_atom)
9017                                         if not pkg:
9018                                                 continue
9019                                         if not self._visible(pkg):
9020                                                 continue
9021                                         self._cpv_pkg_map[pkg.cpv] = pkg
9022                                         ret.append(pkg.cpv)
9023                                 if ret:
9024                                         self._cpv_sort_ascending(ret)
9025                         self._match_cache[orig_atom] = ret
9026                         return ret[:]
9027
9028                 def _visible(self, pkg):
9029                         if pkg.installed and "selective" not in self._depgraph.myparams:
9030                                 try:
9031                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9032                                 except (StopIteration, portage.exception.InvalidDependString):
9033                                         arg = None
9034                                 if arg:
9035                                         return False
9036                         if pkg.installed:
9037                                 try:
9038                                         if not visible(
9039                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9040                                                 return False
9041                                 except portage.exception.InvalidDependString:
9042                                         pass
9043                         in_graph = self._depgraph._slot_pkg_map[
9044                                 self._root].get(pkg.slot_atom)
9045                         if in_graph is None:
9046                                 # Mask choices for packages which are not the highest visible
9047                                 # version within their slot (since they usually trigger slot
9048                                 # conflicts).
9049                                 highest_visible, in_graph = self._depgraph._select_package(
9050                                         self._root, pkg.slot_atom)
9051                                 if pkg != highest_visible:
9052                                         return False
9053                         elif in_graph != pkg:
9054                                 # Mask choices for packages that would trigger a slot
9055                                 # conflict with a previously selected package.
9056                                 return False
9057                         return True
9058
9059                 def _dep_expand(self, atom):
9060                         """
9061                         This is only needed for old installed packages that may
9062                         contain atoms that are not fully qualified with a specific
9063                         category. Emulate the cpv_expand() function that's used by
9064                         dbapi.match() in cases like this. If there are multiple
9065                         matches, it's often due to a new-style virtual that has
9066                         been added, so try to filter those out to avoid raising
9067                         a ValueError.
9068                         """
9069                         root_config = self._depgraph.roots[self._root]
9070                         orig_atom = atom
9071                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9072                         if len(expanded_atoms) > 1:
9073                                 non_virtual_atoms = []
9074                                 for x in expanded_atoms:
9075                                         if not portage.dep_getkey(x).startswith("virtual/"):
9076                                                 non_virtual_atoms.append(x)
9077                                 if len(non_virtual_atoms) == 1:
9078                                         expanded_atoms = non_virtual_atoms
9079                         if len(expanded_atoms) > 1:
9080                                 # compatible with portage.cpv_expand()
9081                                 raise portage.exception.AmbiguousPackageName(
9082                                         [portage.dep_getkey(x) for x in expanded_atoms])
9083                         if expanded_atoms:
9084                                 atom = expanded_atoms[0]
9085                         else:
9086                                 null_atom = insert_category_into_atom(atom, "null")
9087                                 null_cp = portage.dep_getkey(null_atom)
9088                                 cat, atom_pn = portage.catsplit(null_cp)
9089                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9090                                 if virts_p:
9091                                         # Allow the resolver to choose which virtual.
9092                                         atom = insert_category_into_atom(atom, "virtual")
9093                                 else:
9094                                         atom = insert_category_into_atom(atom, "null")
9095                         return atom
9096
9097                 def aux_get(self, cpv, wants):
9098                         metadata = self._cpv_pkg_map[cpv].metadata
9099                         return [metadata.get(x, "") for x in wants]
9100
9101 class RepoDisplay(object):
9102         def __init__(self, roots):
9103                 self._shown_repos = {}
9104                 self._unknown_repo = False
9105                 repo_paths = set()
9106                 for root_config in roots.itervalues():
9107                         portdir = root_config.settings.get("PORTDIR")
9108                         if portdir:
9109                                 repo_paths.add(portdir)
9110                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9111                         if overlays:
9112                                 repo_paths.update(overlays.split())
9113                 repo_paths = list(repo_paths)
9114                 self._repo_paths = repo_paths
9115                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9116                         for repo_path in repo_paths ]
9117
9118                 # pre-allocate index for PORTDIR so that it always has index 0.
9119                 for root_config in roots.itervalues():
9120                         portdb = root_config.trees["porttree"].dbapi
9121                         portdir = portdb.porttree_root
9122                         if portdir:
9123                                 self.repoStr(portdir)
9124
9125         def repoStr(self, repo_path_real):
9126                 real_index = -1
9127                 if repo_path_real:
9128                         real_index = self._repo_paths_real.index(repo_path_real)
9129                 if real_index == -1:
9130                         s = "?"
9131                         self._unknown_repo = True
9132                 else:
9133                         shown_repos = self._shown_repos
9134                         repo_paths = self._repo_paths
9135                         repo_path = repo_paths[real_index]
9136                         index = shown_repos.get(repo_path)
9137                         if index is None:
9138                                 index = len(shown_repos)
9139                                 shown_repos[repo_path] = index
9140                         s = str(index)
9141                 return s
9142
9143         def __str__(self):
9144                 output = []
9145                 shown_repos = self._shown_repos
9146                 unknown_repo = self._unknown_repo
9147                 if shown_repos or self._unknown_repo:
9148                         output.append("Portage tree and overlays:\n")
9149                 show_repo_paths = list(shown_repos)
9150                 for repo_path, repo_index in shown_repos.iteritems():
9151                         show_repo_paths[repo_index] = repo_path
9152                 if show_repo_paths:
9153                         for index, repo_path in enumerate(show_repo_paths):
9154                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9155                 if unknown_repo:
9156                         output.append(" "+teal("[?]") + \
9157                                 " indicates that the source repository could not be determined\n")
9158                 return "".join(output)
9159
9160 class PackageCounters(object):
9161
9162         def __init__(self):
9163                 self.upgrades   = 0
9164                 self.downgrades = 0
9165                 self.new        = 0
9166                 self.newslot    = 0
9167                 self.reinst     = 0
9168                 self.uninst     = 0
9169                 self.blocks     = 0
9170                 self.blocks_satisfied         = 0
9171                 self.totalsize  = 0
9172                 self.restrict_fetch           = 0
9173                 self.restrict_fetch_satisfied = 0
9174                 self.interactive              = 0
9175
9176         def __str__(self):
9177                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9178                 myoutput = []
9179                 details = []
9180                 myoutput.append("Total: %s package" % total_installs)
9181                 if total_installs != 1:
9182                         myoutput.append("s")
9183                 if total_installs != 0:
9184                         myoutput.append(" (")
9185                 if self.upgrades > 0:
9186                         details.append("%s upgrade" % self.upgrades)
9187                         if self.upgrades > 1:
9188                                 details[-1] += "s"
9189                 if self.downgrades > 0:
9190                         details.append("%s downgrade" % self.downgrades)
9191                         if self.downgrades > 1:
9192                                 details[-1] += "s"
9193                 if self.new > 0:
9194                         details.append("%s new" % self.new)
9195                 if self.newslot > 0:
9196                         details.append("%s in new slot" % self.newslot)
9197                         if self.newslot > 1:
9198                                 details[-1] += "s"
9199                 if self.reinst > 0:
9200                         details.append("%s reinstall" % self.reinst)
9201                         if self.reinst > 1:
9202                                 details[-1] += "s"
9203                 if self.uninst > 0:
9204                         details.append("%s uninstall" % self.uninst)
9205                         if self.uninst > 1:
9206                                 details[-1] += "s"
9207                 if self.interactive > 0:
9208                         details.append("%s %s" % (self.interactive,
9209                                 colorize("WARN", "interactive")))
9210                 myoutput.append(", ".join(details))
9211                 if total_installs != 0:
9212                         myoutput.append(")")
9213                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9214                 if self.restrict_fetch:
9215                         myoutput.append("\nFetch Restriction: %s package" % \
9216                                 self.restrict_fetch)
9217                         if self.restrict_fetch > 1:
9218                                 myoutput.append("s")
9219                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9220                         myoutput.append(bad(" (%s unsatisfied)") % \
9221                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9222                 if self.blocks > 0:
9223                         myoutput.append("\nConflict: %s block" % \
9224                                 self.blocks)
9225                         if self.blocks > 1:
9226                                 myoutput.append("s")
9227                         if self.blocks_satisfied < self.blocks:
9228                                 myoutput.append(bad(" (%s unsatisfied)") % \
9229                                         (self.blocks - self.blocks_satisfied))
9230                 return "".join(myoutput)
9231
9232 class PollSelectAdapter(PollConstants):
9233
9234         """
9235         Use select to emulate a poll object, for
9236         systems that don't support poll().
9237         """
9238
9239         def __init__(self):
9240                 self._registered = {}
9241                 self._select_args = [[], [], []]
9242
9243         def register(self, fd, *args):
9244                 """
9245                 Only POLLIN is currently supported!
9246                 """
9247                 if len(args) > 1:
9248                         raise TypeError(
9249                                 "register expected at most 2 arguments, got " + \
9250                                 repr(1 + len(args)))
9251
9252                 eventmask = PollConstants.POLLIN | \
9253                         PollConstants.POLLPRI | PollConstants.POLLOUT
9254                 if args:
9255                         eventmask = args[0]
9256
9257                 self._registered[fd] = eventmask
9258                 self._select_args = None
9259
9260         def unregister(self, fd):
9261                 self._select_args = None
9262                 del self._registered[fd]
9263
9264         def poll(self, *args):
9265                 if len(args) > 1:
9266                         raise TypeError(
9267                                 "poll expected at most 2 arguments, got " + \
9268                                 repr(1 + len(args)))
9269
9270                 timeout = None
9271                 if args:
9272                         timeout = args[0]
9273
9274                 select_args = self._select_args
9275                 if select_args is None:
9276                         select_args = [self._registered.keys(), [], []]
9277
9278                 if timeout is not None:
9279                         select_args = select_args[:]
9280                         # Translate poll() timeout args to select() timeout args:
9281                         #
9282                         #          | units        | value(s) for indefinite block
9283                         # ---------|--------------|------------------------------
9284                         #   poll   | milliseconds | omitted, negative, or None
9285                         # ---------|--------------|------------------------------
9286                         #   select | seconds      | omitted
9287                         # ---------|--------------|------------------------------
9288
9289                         if timeout is not None and timeout < 0:
9290                                 timeout = None
9291                         if timeout is not None:
9292                                 select_args.append(timeout / 1000)
9293
9294                 select_events = select.select(*select_args)
9295                 poll_events = []
9296                 for fd in select_events[0]:
9297                         poll_events.append((fd, PollConstants.POLLIN))
9298                 return poll_events
9299
9300 class SequentialTaskQueue(SlotObject):
9301
9302         __slots__ = ("max_jobs", "running_tasks") + \
9303                 ("_dirty", "_scheduling", "_task_queue")
9304
9305         def __init__(self, **kwargs):
9306                 SlotObject.__init__(self, **kwargs)
9307                 self._task_queue = deque()
9308                 self.running_tasks = set()
9309                 if self.max_jobs is None:
9310                         self.max_jobs = 1
9311                 self._dirty = True
9312
9313         def add(self, task):
9314                 self._task_queue.append(task)
9315                 self._dirty = True
9316
9317         def addFront(self, task):
9318                 self._task_queue.appendleft(task)
9319                 self._dirty = True
9320
9321         def schedule(self):
9322
9323                 if not self._dirty:
9324                         return False
9325
9326                 if not self:
9327                         return False
9328
9329                 if self._scheduling:
9330                         # Ignore any recursive schedule() calls triggered via
9331                         # self._task_exit().
9332                         return False
9333
9334                 self._scheduling = True
9335
9336                 task_queue = self._task_queue
9337                 running_tasks = self.running_tasks
9338                 max_jobs = self.max_jobs
9339                 state_changed = False
9340
9341                 while task_queue and \
9342                         (max_jobs is True or len(running_tasks) < max_jobs):
9343                         task = task_queue.popleft()
9344                         cancelled = getattr(task, "cancelled", None)
9345                         if not cancelled:
9346                                 running_tasks.add(task)
9347                                 task.addExitListener(self._task_exit)
9348                                 task.start()
9349                         state_changed = True
9350
9351                 self._dirty = False
9352                 self._scheduling = False
9353
9354                 return state_changed
9355
9356         def _task_exit(self, task):
9357                 """
9358                 Since we can always rely on exit listeners being called, the set of
9359                 running tasks is always pruned automatically and there is never any need
9360                 to actively prune it.
9361                 """
9362                 self.running_tasks.remove(task)
9363                 if self._task_queue:
9364                         self._dirty = True
9365
9366         def clear(self):
9367                 self._task_queue.clear()
9368                 running_tasks = self.running_tasks
9369                 while running_tasks:
9370                         task = running_tasks.pop()
9371                         task.removeExitListener(self._task_exit)
9372                         task.cancel()
9373                 self._dirty = False
9374
9375         def __nonzero__(self):
9376                 return bool(self._task_queue or self.running_tasks)
9377
9378         def __len__(self):
9379                 return len(self._task_queue) + len(self.running_tasks)
9380
9381 _can_poll_device = None
9382
9383 def can_poll_device():
9384         """
9385         Test if it's possible to use poll() on a device such as a pty. This
9386         is known to fail on Darwin.
9387         @rtype: bool
9388         @returns: True if poll() on a device succeeds, False otherwise.
9389         """
9390
9391         global _can_poll_device
9392         if _can_poll_device is not None:
9393                 return _can_poll_device
9394
9395         if not hasattr(select, "poll"):
9396                 _can_poll_device = False
9397                 return _can_poll_device
9398
9399         try:
9400                 dev_null = open('/dev/null', 'rb')
9401         except IOError:
9402                 _can_poll_device = False
9403                 return _can_poll_device
9404
9405         p = select.poll()
9406         p.register(dev_null.fileno(), PollConstants.POLLIN)
9407
9408         invalid_request = False
9409         for f, event in p.poll():
9410                 if event & PollConstants.POLLNVAL:
9411                         invalid_request = True
9412                         break
9413         dev_null.close()
9414
9415         _can_poll_device = not invalid_request
9416         return _can_poll_device
9417
9418 def create_poll_instance():
9419         """
9420         Create an instance of select.poll, or an instance of
9421         PollSelectAdapter there is no poll() implementation or
9422         it is broken somehow.
9423         """
9424         if can_poll_device():
9425                 return select.poll()
9426         return PollSelectAdapter()
9427
9428 getloadavg = getattr(os, "getloadavg", None)
9429 if getloadavg is None:
9430         def getloadavg():
9431                 """
9432                 Uses /proc/loadavg to emulate os.getloadavg().
9433                 Raises OSError if the load average was unobtainable.
9434                 """
9435                 try:
9436                         loadavg_str = open('/proc/loadavg').readline()
9437                 except IOError:
9438                         # getloadavg() is only supposed to raise OSError, so convert
9439                         raise OSError('unknown')
9440                 loadavg_split = loadavg_str.split()
9441                 if len(loadavg_split) < 3:
9442                         raise OSError('unknown')
9443                 loadavg_floats = []
9444                 for i in xrange(3):
9445                         try:
9446                                 loadavg_floats.append(float(loadavg_split[i]))
9447                         except ValueError:
9448                                 raise OSError('unknown')
9449                 return tuple(loadavg_floats)
9450
9451 class PollScheduler(object):
9452
9453         class _sched_iface_class(SlotObject):
9454                 __slots__ = ("register", "schedule", "unregister")
9455
9456         def __init__(self):
9457                 self._max_jobs = 1
9458                 self._max_load = None
9459                 self._jobs = 0
9460                 self._poll_event_queue = []
9461                 self._poll_event_handlers = {}
9462                 self._poll_event_handler_ids = {}
9463                 # Increment id for each new handler.
9464                 self._event_handler_id = 0
9465                 self._poll_obj = create_poll_instance()
9466                 self._scheduling = False
9467
9468         def _schedule(self):
9469                 """
9470                 Calls _schedule_tasks() and automatically returns early from
9471                 any recursive calls to this method that the _schedule_tasks()
9472                 call might trigger. This makes _schedule() safe to call from
9473                 inside exit listeners.
9474                 """
9475                 if self._scheduling:
9476                         return False
9477                 self._scheduling = True
9478                 try:
9479                         return self._schedule_tasks()
9480                 finally:
9481                         self._scheduling = False
9482
9483         def _running_job_count(self):
9484                 return self._jobs
9485
9486         def _can_add_job(self):
9487                 max_jobs = self._max_jobs
9488                 max_load = self._max_load
9489
9490                 if self._max_jobs is not True and \
9491                         self._running_job_count() >= self._max_jobs:
9492                         return False
9493
9494                 if max_load is not None and \
9495                         (max_jobs is True or max_jobs > 1) and \
9496                         self._running_job_count() >= 1:
9497                         try:
9498                                 avg1, avg5, avg15 = getloadavg()
9499                         except OSError:
9500                                 return False
9501
9502                         if avg1 >= max_load:
9503                                 return False
9504
9505                 return True
9506
9507         def _poll(self, timeout=None):
9508                 """
9509                 All poll() calls pass through here. The poll events
9510                 are added directly to self._poll_event_queue.
9511                 In order to avoid endless blocking, this raises
9512                 StopIteration if timeout is None and there are
9513                 no file descriptors to poll.
9514                 """
9515                 if not self._poll_event_handlers:
9516                         self._schedule()
9517                         if timeout is None and \
9518                                 not self._poll_event_handlers:
9519                                 raise StopIteration(
9520                                         "timeout is None and there are no poll() event handlers")
9521
9522                 # The following error is known to occur with Linux kernel versions
9523                 # less than 2.6.24:
9524                 #
9525                 #   select.error: (4, 'Interrupted system call')
9526                 #
9527                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9528                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9529                 # without any events.
9530                 while True:
9531                         try:
9532                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9533                                 break
9534                         except select.error, e:
9535                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9536                                         level=logging.ERROR, noiselevel=-1)
9537                                 del e
9538                                 if timeout is not None:
9539                                         break
9540
9541         def _next_poll_event(self, timeout=None):
9542                 """
9543                 Since the _schedule_wait() loop is called by event
9544                 handlers from _poll_loop(), maintain a central event
9545                 queue for both of them to share events from a single
9546                 poll() call. In order to avoid endless blocking, this
9547                 raises StopIteration if timeout is None and there are
9548                 no file descriptors to poll.
9549                 """
9550                 if not self._poll_event_queue:
9551                         self._poll(timeout)
9552                 return self._poll_event_queue.pop()
9553
9554         def _poll_loop(self):
9555
9556                 event_handlers = self._poll_event_handlers
9557                 event_handled = False
9558
9559                 try:
9560                         while event_handlers:
9561                                 f, event = self._next_poll_event()
9562                                 handler, reg_id = event_handlers[f]
9563                                 handler(f, event)
9564                                 event_handled = True
9565                 except StopIteration:
9566                         event_handled = True
9567
9568                 if not event_handled:
9569                         raise AssertionError("tight loop")
9570
9571         def _schedule_yield(self):
9572                 """
9573                 Schedule for a short period of time chosen by the scheduler based
9574                 on internal state. Synchronous tasks should call this periodically
9575                 in order to allow the scheduler to service pending poll events. The
9576                 scheduler will call poll() exactly once, without blocking, and any
9577                 resulting poll events will be serviced.
9578                 """
9579                 event_handlers = self._poll_event_handlers
9580                 events_handled = 0
9581
9582                 if not event_handlers:
9583                         return bool(events_handled)
9584
9585                 if not self._poll_event_queue:
9586                         self._poll(0)
9587
9588                 try:
9589                         while event_handlers and self._poll_event_queue:
9590                                 f, event = self._next_poll_event()
9591                                 handler, reg_id = event_handlers[f]
9592                                 handler(f, event)
9593                                 events_handled += 1
9594                 except StopIteration:
9595                         events_handled += 1
9596
9597                 return bool(events_handled)
9598
9599         def _register(self, f, eventmask, handler):
9600                 """
9601                 @rtype: Integer
9602                 @return: A unique registration id, for use in schedule() or
9603                         unregister() calls.
9604                 """
9605                 if f in self._poll_event_handlers:
9606                         raise AssertionError("fd %d is already registered" % f)
9607                 self._event_handler_id += 1
9608                 reg_id = self._event_handler_id
9609                 self._poll_event_handler_ids[reg_id] = f
9610                 self._poll_event_handlers[f] = (handler, reg_id)
9611                 self._poll_obj.register(f, eventmask)
9612                 return reg_id
9613
9614         def _unregister(self, reg_id):
9615                 f = self._poll_event_handler_ids[reg_id]
9616                 self._poll_obj.unregister(f)
9617                 del self._poll_event_handlers[f]
9618                 del self._poll_event_handler_ids[reg_id]
9619
9620         def _schedule_wait(self, wait_ids):
9621                 """
9622                 Schedule until wait_id is not longer registered
9623                 for poll() events.
9624                 @type wait_id: int
9625                 @param wait_id: a task id to wait for
9626                 """
9627                 event_handlers = self._poll_event_handlers
9628                 handler_ids = self._poll_event_handler_ids
9629                 event_handled = False
9630
9631                 if isinstance(wait_ids, int):
9632                         wait_ids = frozenset([wait_ids])
9633
9634                 try:
9635                         while wait_ids.intersection(handler_ids):
9636                                 f, event = self._next_poll_event()
9637                                 handler, reg_id = event_handlers[f]
9638                                 handler(f, event)
9639                                 event_handled = True
9640                 except StopIteration:
9641                         event_handled = True
9642
9643                 return event_handled
9644
9645 class QueueScheduler(PollScheduler):
9646
9647         """
9648         Add instances of SequentialTaskQueue and then call run(). The
9649         run() method returns when no tasks remain.
9650         """
9651
9652         def __init__(self, max_jobs=None, max_load=None):
9653                 PollScheduler.__init__(self)
9654
9655                 if max_jobs is None:
9656                         max_jobs = 1
9657
9658                 self._max_jobs = max_jobs
9659                 self._max_load = max_load
9660                 self.sched_iface = self._sched_iface_class(
9661                         register=self._register,
9662                         schedule=self._schedule_wait,
9663                         unregister=self._unregister)
9664
9665                 self._queues = []
9666                 self._schedule_listeners = []
9667
9668         def add(self, q):
9669                 self._queues.append(q)
9670
9671         def remove(self, q):
9672                 self._queues.remove(q)
9673
9674         def run(self):
9675
9676                 while self._schedule():
9677                         self._poll_loop()
9678
9679                 while self._running_job_count():
9680                         self._poll_loop()
9681
9682         def _schedule_tasks(self):
9683                 """
9684                 @rtype: bool
9685                 @returns: True if there may be remaining tasks to schedule,
9686                         False otherwise.
9687                 """
9688                 while self._can_add_job():
9689                         n = self._max_jobs - self._running_job_count()
9690                         if n < 1:
9691                                 break
9692
9693                         if not self._start_next_job(n):
9694                                 return False
9695
9696                 for q in self._queues:
9697                         if q:
9698                                 return True
9699                 return False
9700
9701         def _running_job_count(self):
9702                 job_count = 0
9703                 for q in self._queues:
9704                         job_count += len(q.running_tasks)
9705                 self._jobs = job_count
9706                 return job_count
9707
9708         def _start_next_job(self, n=1):
9709                 started_count = 0
9710                 for q in self._queues:
9711                         initial_job_count = len(q.running_tasks)
9712                         q.schedule()
9713                         final_job_count = len(q.running_tasks)
9714                         if final_job_count > initial_job_count:
9715                                 started_count += (final_job_count - initial_job_count)
9716                         if started_count >= n:
9717                                 break
9718                 return started_count
9719
9720 class TaskScheduler(object):
9721
9722         """
9723         A simple way to handle scheduling of AsynchrousTask instances. Simply
9724         add tasks and call run(). The run() method returns when no tasks remain.
9725         """
9726
9727         def __init__(self, max_jobs=None, max_load=None):
9728                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9729                 self._scheduler = QueueScheduler(
9730                         max_jobs=max_jobs, max_load=max_load)
9731                 self.sched_iface = self._scheduler.sched_iface
9732                 self.run = self._scheduler.run
9733                 self._scheduler.add(self._queue)
9734
9735         def add(self, task):
9736                 self._queue.add(task)
9737
9738 class JobStatusDisplay(object):
9739
9740         _bound_properties = ("curval", "failed", "running")
9741         _jobs_column_width = 48
9742
9743         # Don't update the display unless at least this much
9744         # time has passed, in units of seconds.
9745         _min_display_latency = 2
9746
9747         _default_term_codes = {
9748                 'cr'  : '\r',
9749                 'el'  : '\x1b[K',
9750                 'nel' : '\n',
9751         }
9752
9753         _termcap_name_map = {
9754                 'carriage_return' : 'cr',
9755                 'clr_eol'         : 'el',
9756                 'newline'         : 'nel',
9757         }
9758
9759         def __init__(self, out=sys.stdout, quiet=False):
9760                 object.__setattr__(self, "out", out)
9761                 object.__setattr__(self, "quiet", quiet)
9762                 object.__setattr__(self, "maxval", 0)
9763                 object.__setattr__(self, "merges", 0)
9764                 object.__setattr__(self, "_changed", False)
9765                 object.__setattr__(self, "_displayed", False)
9766                 object.__setattr__(self, "_last_display_time", 0)
9767                 object.__setattr__(self, "width", 80)
9768                 self.reset()
9769
9770                 isatty = hasattr(out, "isatty") and out.isatty()
9771                 object.__setattr__(self, "_isatty", isatty)
9772                 if not isatty or not self._init_term():
9773                         term_codes = {}
9774                         for k, capname in self._termcap_name_map.iteritems():
9775                                 term_codes[k] = self._default_term_codes[capname]
9776                         object.__setattr__(self, "_term_codes", term_codes)
9777                 encoding = sys.getdefaultencoding()
9778                 for k, v in self._term_codes.items():
9779                         if not isinstance(v, basestring):
9780                                 self._term_codes[k] = v.decode(encoding, 'replace')
9781
9782         def _init_term(self):
9783                 """
9784                 Initialize term control codes.
9785                 @rtype: bool
9786                 @returns: True if term codes were successfully initialized,
9787                         False otherwise.
9788                 """
9789
9790                 term_type = os.environ.get("TERM", "vt100")
9791                 tigetstr = None
9792
9793                 try:
9794                         import curses
9795                         try:
9796                                 curses.setupterm(term_type, self.out.fileno())
9797                                 tigetstr = curses.tigetstr
9798                         except curses.error:
9799                                 pass
9800                 except ImportError:
9801                         pass
9802
9803                 if tigetstr is None:
9804                         return False
9805
9806                 term_codes = {}
9807                 for k, capname in self._termcap_name_map.iteritems():
9808                         code = tigetstr(capname)
9809                         if code is None:
9810                                 code = self._default_term_codes[capname]
9811                         term_codes[k] = code
9812                 object.__setattr__(self, "_term_codes", term_codes)
9813                 return True
9814
9815         def _format_msg(self, msg):
9816                 return ">>> %s" % msg
9817
9818         def _erase(self):
9819                 self.out.write(
9820                         self._term_codes['carriage_return'] + \
9821                         self._term_codes['clr_eol'])
9822                 self.out.flush()
9823                 self._displayed = False
9824
9825         def _display(self, line):
9826                 self.out.write(line)
9827                 self.out.flush()
9828                 self._displayed = True
9829
9830         def _update(self, msg):
9831
9832                 out = self.out
9833                 if not self._isatty:
9834                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9835                         self.out.flush()
9836                         self._displayed = True
9837                         return
9838
9839                 if self._displayed:
9840                         self._erase()
9841
9842                 self._display(self._format_msg(msg))
9843
9844         def displayMessage(self, msg):
9845
9846                 was_displayed = self._displayed
9847
9848                 if self._isatty and self._displayed:
9849                         self._erase()
9850
9851                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9852                 self.out.flush()
9853                 self._displayed = False
9854
9855                 if was_displayed:
9856                         self._changed = True
9857                         self.display()
9858
9859         def reset(self):
9860                 self.maxval = 0
9861                 self.merges = 0
9862                 for name in self._bound_properties:
9863                         object.__setattr__(self, name, 0)
9864
9865                 if self._displayed:
9866                         self.out.write(self._term_codes['newline'])
9867                         self.out.flush()
9868                         self._displayed = False
9869
9870         def __setattr__(self, name, value):
9871                 old_value = getattr(self, name)
9872                 if value == old_value:
9873                         return
9874                 object.__setattr__(self, name, value)
9875                 if name in self._bound_properties:
9876                         self._property_change(name, old_value, value)
9877
9878         def _property_change(self, name, old_value, new_value):
9879                 self._changed = True
9880                 self.display()
9881
9882         def _load_avg_str(self):
9883                 try:
9884                         avg = getloadavg()
9885                 except OSError:
9886                         return 'unknown'
9887
9888                 max_avg = max(avg)
9889
9890                 if max_avg < 10:
9891                         digits = 2
9892                 elif max_avg < 100:
9893                         digits = 1
9894                 else:
9895                         digits = 0
9896
9897                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9898
9899         def display(self):
9900                 """
9901                 Display status on stdout, but only if something has
9902                 changed since the last call.
9903                 """
9904
9905                 if self.quiet:
9906                         return
9907
9908                 current_time = time.time()
9909                 time_delta = current_time - self._last_display_time
9910                 if self._displayed and \
9911                         not self._changed:
9912                         if not self._isatty:
9913                                 return
9914                         if time_delta < self._min_display_latency:
9915                                 return
9916
9917                 self._last_display_time = current_time
9918                 self._changed = False
9919                 self._display_status()
9920
9921         def _display_status(self):
9922                 # Don't use len(self._completed_tasks) here since that also
9923                 # can include uninstall tasks.
9924                 curval_str = str(self.curval)
9925                 maxval_str = str(self.maxval)
9926                 running_str = str(self.running)
9927                 failed_str = str(self.failed)
9928                 load_avg_str = self._load_avg_str()
9929
9930                 color_output = StringIO()
9931                 plain_output = StringIO()
9932                 style_file = portage.output.ConsoleStyleFile(color_output)
9933                 style_file.write_listener = plain_output
9934                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9935                 style_writer.style_listener = style_file.new_styles
9936                 f = formatter.AbstractFormatter(style_writer)
9937
9938                 number_style = "INFORM"
9939                 f.add_literal_data("Jobs: ")
9940                 f.push_style(number_style)
9941                 f.add_literal_data(curval_str)
9942                 f.pop_style()
9943                 f.add_literal_data(" of ")
9944                 f.push_style(number_style)
9945                 f.add_literal_data(maxval_str)
9946                 f.pop_style()
9947                 f.add_literal_data(" complete")
9948
9949                 if self.running:
9950                         f.add_literal_data(", ")
9951                         f.push_style(number_style)
9952                         f.add_literal_data(running_str)
9953                         f.pop_style()
9954                         f.add_literal_data(" running")
9955
9956                 if self.failed:
9957                         f.add_literal_data(", ")
9958                         f.push_style(number_style)
9959                         f.add_literal_data(failed_str)
9960                         f.pop_style()
9961                         f.add_literal_data(" failed")
9962
9963                 padding = self._jobs_column_width - len(plain_output.getvalue())
9964                 if padding > 0:
9965                         f.add_literal_data(padding * " ")
9966
9967                 f.add_literal_data("Load avg: ")
9968                 f.add_literal_data(load_avg_str)
9969
9970                 # Truncate to fit width, to avoid making the terminal scroll if the
9971                 # line overflows (happens when the load average is large).
9972                 plain_output = plain_output.getvalue()
9973                 if self._isatty and len(plain_output) > self.width:
9974                         # Use plain_output here since it's easier to truncate
9975                         # properly than the color output which contains console
9976                         # color codes.
9977                         self._update(plain_output[:self.width])
9978                 else:
9979                         self._update(color_output.getvalue())
9980
9981                 xtermTitle(" ".join(plain_output.split()))
9982
9983 class Scheduler(PollScheduler):
9984
9985         _opts_ignore_blockers = \
9986                 frozenset(["--buildpkgonly",
9987                 "--fetchonly", "--fetch-all-uri",
9988                 "--nodeps", "--pretend"])
9989
9990         _opts_no_background = \
9991                 frozenset(["--pretend",
9992                 "--fetchonly", "--fetch-all-uri"])
9993
9994         _opts_no_restart = frozenset(["--buildpkgonly",
9995                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9996
9997         _bad_resume_opts = set(["--ask", "--changelog",
9998                 "--resume", "--skipfirst"])
9999
10000         _fetch_log = "/var/log/emerge-fetch.log"
10001
10002         class _iface_class(SlotObject):
10003                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10004                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10005                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10006                         "unregister")
10007
10008         class _fetch_iface_class(SlotObject):
10009                 __slots__ = ("log_file", "schedule")
10010
10011         _task_queues_class = slot_dict_class(
10012                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10013
10014         class _build_opts_class(SlotObject):
10015                 __slots__ = ("buildpkg", "buildpkgonly",
10016                         "fetch_all_uri", "fetchonly", "pretend")
10017
10018         class _binpkg_opts_class(SlotObject):
10019                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10020
10021         class _pkg_count_class(SlotObject):
10022                 __slots__ = ("curval", "maxval")
10023
10024         class _emerge_log_class(SlotObject):
10025                 __slots__ = ("xterm_titles",)
10026
10027                 def log(self, *pargs, **kwargs):
10028                         if not self.xterm_titles:
10029                                 # Avoid interference with the scheduler's status display.
10030                                 kwargs.pop("short_msg", None)
10031                         emergelog(self.xterm_titles, *pargs, **kwargs)
10032
10033         class _failed_pkg(SlotObject):
10034                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10035
10036         class _ConfigPool(object):
10037                 """Interface for a task to temporarily allocate a config
10038                 instance from a pool. This allows a task to be constructed
10039                 long before the config instance actually becomes needed, like
10040                 when prefetchers are constructed for the whole merge list."""
10041                 __slots__ = ("_root", "_allocate", "_deallocate")
10042                 def __init__(self, root, allocate, deallocate):
10043                         self._root = root
10044                         self._allocate = allocate
10045                         self._deallocate = deallocate
10046                 def allocate(self):
10047                         return self._allocate(self._root)
10048                 def deallocate(self, settings):
10049                         self._deallocate(settings)
10050
10051         class _unknown_internal_error(portage.exception.PortageException):
10052                 """
10053                 Used internally to terminate scheduling. The specific reason for
10054                 the failure should have been dumped to stderr.
10055                 """
10056                 def __init__(self, value=""):
10057                         portage.exception.PortageException.__init__(self, value)
10058
10059         def __init__(self, settings, trees, mtimedb, myopts,
10060                 spinner, mergelist, favorites, digraph):
10061                 PollScheduler.__init__(self)
10062                 self.settings = settings
10063                 self.target_root = settings["ROOT"]
10064                 self.trees = trees
10065                 self.myopts = myopts
10066                 self._spinner = spinner
10067                 self._mtimedb = mtimedb
10068                 self._mergelist = mergelist
10069                 self._favorites = favorites
10070                 self._args_set = InternalPackageSet(favorites)
10071                 self._build_opts = self._build_opts_class()
10072                 for k in self._build_opts.__slots__:
10073                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10074                 self._binpkg_opts = self._binpkg_opts_class()
10075                 for k in self._binpkg_opts.__slots__:
10076                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10077
10078                 self.curval = 0
10079                 self._logger = self._emerge_log_class()
10080                 self._task_queues = self._task_queues_class()
10081                 for k in self._task_queues.allowed_keys:
10082                         setattr(self._task_queues, k,
10083                                 SequentialTaskQueue())
10084
10085                 # Holds merges that will wait to be executed when no builds are
10086                 # executing. This is useful for system packages since dependencies
10087                 # on system packages are frequently unspecified.
10088                 self._merge_wait_queue = []
10089                 # Holds merges that have been transfered from the merge_wait_queue to
10090                 # the actual merge queue. They are removed from this list upon
10091                 # completion. Other packages can start building only when this list is
10092                 # empty.
10093                 self._merge_wait_scheduled = []
10094
10095                 # Holds system packages and their deep runtime dependencies. Before
10096                 # being merged, these packages go to merge_wait_queue, to be merged
10097                 # when no other packages are building.
10098                 self._deep_system_deps = set()
10099
10100                 # Holds packages to merge which will satisfy currently unsatisfied
10101                 # deep runtime dependencies of system packages. If this is not empty
10102                 # then no parallel builds will be spawned until it is empty. This
10103                 # minimizes the possibility that a build will fail due to the system
10104                 # being in a fragile state. For example, see bug #259954.
10105                 self._unsatisfied_system_deps = set()
10106
10107                 self._status_display = JobStatusDisplay()
10108                 self._max_load = myopts.get("--load-average")
10109                 max_jobs = myopts.get("--jobs")
10110                 if max_jobs is None:
10111                         max_jobs = 1
10112                 self._set_max_jobs(max_jobs)
10113
10114                 # The root where the currently running
10115                 # portage instance is installed.
10116                 self._running_root = trees["/"]["root_config"]
10117                 self.edebug = 0
10118                 if settings.get("PORTAGE_DEBUG", "") == "1":
10119                         self.edebug = 1
10120                 self.pkgsettings = {}
10121                 self._config_pool = {}
10122                 self._blocker_db = {}
10123                 for root in trees:
10124                         self._config_pool[root] = []
10125                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10126
10127                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10128                         schedule=self._schedule_fetch)
10129                 self._sched_iface = self._iface_class(
10130                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10131                         dblinkDisplayMerge=self._dblink_display_merge,
10132                         dblinkElog=self._dblink_elog,
10133                         dblinkEmergeLog=self._dblink_emerge_log,
10134                         fetch=fetch_iface, register=self._register,
10135                         schedule=self._schedule_wait,
10136                         scheduleSetup=self._schedule_setup,
10137                         scheduleUnpack=self._schedule_unpack,
10138                         scheduleYield=self._schedule_yield,
10139                         unregister=self._unregister)
10140
10141                 self._prefetchers = weakref.WeakValueDictionary()
10142                 self._pkg_queue = []
10143                 self._completed_tasks = set()
10144
10145                 self._failed_pkgs = []
10146                 self._failed_pkgs_all = []
10147                 self._failed_pkgs_die_msgs = []
10148                 self._post_mod_echo_msgs = []
10149                 self._parallel_fetch = False
10150                 merge_count = len([x for x in mergelist \
10151                         if isinstance(x, Package) and x.operation == "merge"])
10152                 self._pkg_count = self._pkg_count_class(
10153                         curval=0, maxval=merge_count)
10154                 self._status_display.maxval = self._pkg_count.maxval
10155
10156                 # The load average takes some time to respond when new
10157                 # jobs are added, so we need to limit the rate of adding
10158                 # new jobs.
10159                 self._job_delay_max = 10
10160                 self._job_delay_factor = 1.0
10161                 self._job_delay_exp = 1.5
10162                 self._previous_job_start_time = None
10163
10164                 self._set_digraph(digraph)
10165
10166                 # This is used to memoize the _choose_pkg() result when
10167                 # no packages can be chosen until one of the existing
10168                 # jobs completes.
10169                 self._choose_pkg_return_early = False
10170
10171                 features = self.settings.features
10172                 if "parallel-fetch" in features and \
10173                         not ("--pretend" in self.myopts or \
10174                         "--fetch-all-uri" in self.myopts or \
10175                         "--fetchonly" in self.myopts):
10176                         if "distlocks" not in features:
10177                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10178                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10179                                         "requires the distlocks feature enabled"+"\n",
10180                                         noiselevel=-1)
10181                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10182                                         "thus parallel-fetching is being disabled"+"\n",
10183                                         noiselevel=-1)
10184                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10185                         elif len(mergelist) > 1:
10186                                 self._parallel_fetch = True
10187
10188                 if self._parallel_fetch:
10189                                 # clear out existing fetch log if it exists
10190                                 try:
10191                                         open(self._fetch_log, 'w')
10192                                 except EnvironmentError:
10193                                         pass
10194
10195                 self._running_portage = None
10196                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10197                         portage.const.PORTAGE_PACKAGE_ATOM)
10198                 if portage_match:
10199                         cpv = portage_match.pop()
10200                         self._running_portage = self._pkg(cpv, "installed",
10201                                 self._running_root, installed=True)
10202
10203         def _poll(self, timeout=None):
10204                 self._schedule()
10205                 PollScheduler._poll(self, timeout=timeout)
10206
10207         def _set_max_jobs(self, max_jobs):
10208                 self._max_jobs = max_jobs
10209                 self._task_queues.jobs.max_jobs = max_jobs
10210
10211         def _background_mode(self):
10212                 """
10213                 Check if background mode is enabled and adjust states as necessary.
10214
10215                 @rtype: bool
10216                 @returns: True if background mode is enabled, False otherwise.
10217                 """
10218                 background = (self._max_jobs is True or \
10219                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10220                         not bool(self._opts_no_background.intersection(self.myopts))
10221
10222                 if background:
10223                         interactive_tasks = self._get_interactive_tasks()
10224                         if interactive_tasks:
10225                                 background = False
10226                                 writemsg_level(">>> Sending package output to stdio due " + \
10227                                         "to interactive package(s):\n",
10228                                         level=logging.INFO, noiselevel=-1)
10229                                 msg = [""]
10230                                 for pkg in interactive_tasks:
10231                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10232                                         if pkg.root != "/":
10233                                                 pkg_str += " for " + pkg.root
10234                                         msg.append(pkg_str)
10235                                 msg.append("")
10236                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10237                                         level=logging.INFO, noiselevel=-1)
10238                                 if self._max_jobs is True or self._max_jobs > 1:
10239                                         self._set_max_jobs(1)
10240                                         writemsg_level(">>> Setting --jobs=1 due " + \
10241                                                 "to the above interactive package(s)\n",
10242                                                 level=logging.INFO, noiselevel=-1)
10243
10244                 self._status_display.quiet = \
10245                         not background or \
10246                         ("--quiet" in self.myopts and \
10247                         "--verbose" not in self.myopts)
10248
10249                 self._logger.xterm_titles = \
10250                         "notitles" not in self.settings.features and \
10251                         self._status_display.quiet
10252
10253                 return background
10254
10255         def _get_interactive_tasks(self):
10256                 from portage import flatten
10257                 from portage.dep import use_reduce, paren_reduce
10258                 interactive_tasks = []
10259                 for task in self._mergelist:
10260                         if not (isinstance(task, Package) and \
10261                                 task.operation == "merge"):
10262                                 continue
10263                         try:
10264                                 properties = flatten(use_reduce(paren_reduce(
10265                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10266                         except portage.exception.InvalidDependString, e:
10267                                 show_invalid_depstring_notice(task,
10268                                         task.metadata["PROPERTIES"], str(e))
10269                                 raise self._unknown_internal_error()
10270                         if "interactive" in properties:
10271                                 interactive_tasks.append(task)
10272                 return interactive_tasks
10273
10274         def _set_digraph(self, digraph):
10275                 if "--nodeps" in self.myopts or \
10276                         (self._max_jobs is not True and self._max_jobs < 2):
10277                         # save some memory
10278                         self._digraph = None
10279                         return
10280
10281                 self._digraph = digraph
10282                 self._find_system_deps()
10283                 self._prune_digraph()
10284                 self._prevent_builddir_collisions()
10285
10286         def _find_system_deps(self):
10287                 """
10288                 Find system packages and their deep runtime dependencies. Before being
10289                 merged, these packages go to merge_wait_queue, to be merged when no
10290                 other packages are building.
10291                 """
10292                 deep_system_deps = self._deep_system_deps
10293                 deep_system_deps.clear()
10294                 deep_system_deps.update(
10295                         _find_deep_system_runtime_deps(self._digraph))
10296                 deep_system_deps.difference_update([pkg for pkg in \
10297                         deep_system_deps if pkg.operation != "merge"])
10298
10299         def _prune_digraph(self):
10300                 """
10301                 Prune any root nodes that are irrelevant.
10302                 """
10303
10304                 graph = self._digraph
10305                 completed_tasks = self._completed_tasks
10306                 removed_nodes = set()
10307                 while True:
10308                         for node in graph.root_nodes():
10309                                 if not isinstance(node, Package) or \
10310                                         (node.installed and node.operation == "nomerge") or \
10311                                         node.onlydeps or \
10312                                         node in completed_tasks:
10313                                         removed_nodes.add(node)
10314                         if removed_nodes:
10315                                 graph.difference_update(removed_nodes)
10316                         if not removed_nodes:
10317                                 break
10318                         removed_nodes.clear()
10319
10320         def _prevent_builddir_collisions(self):
10321                 """
10322                 When building stages, sometimes the same exact cpv needs to be merged
10323                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10324                 in the builddir. Currently, normal file locks would be inappropriate
10325                 for this purpose since emerge holds all of it's build dir locks from
10326                 the main process.
10327                 """
10328                 cpv_map = {}
10329                 for pkg in self._mergelist:
10330                         if not isinstance(pkg, Package):
10331                                 # a satisfied blocker
10332                                 continue
10333                         if pkg.installed:
10334                                 continue
10335                         if pkg.cpv not in cpv_map:
10336                                 cpv_map[pkg.cpv] = [pkg]
10337                                 continue
10338                         for earlier_pkg in cpv_map[pkg.cpv]:
10339                                 self._digraph.add(earlier_pkg, pkg,
10340                                         priority=DepPriority(buildtime=True))
10341                         cpv_map[pkg.cpv].append(pkg)
10342
10343         class _pkg_failure(portage.exception.PortageException):
10344                 """
10345                 An instance of this class is raised by unmerge() when
10346                 an uninstallation fails.
10347                 """
10348                 status = 1
10349                 def __init__(self, *pargs):
10350                         portage.exception.PortageException.__init__(self, pargs)
10351                         if pargs:
10352                                 self.status = pargs[0]
10353
10354         def _schedule_fetch(self, fetcher):
10355                 """
10356                 Schedule a fetcher on the fetch queue, in order to
10357                 serialize access to the fetch log.
10358                 """
10359                 self._task_queues.fetch.addFront(fetcher)
10360
10361         def _schedule_setup(self, setup_phase):
10362                 """
10363                 Schedule a setup phase on the merge queue, in order to
10364                 serialize unsandboxed access to the live filesystem.
10365                 """
10366                 self._task_queues.merge.addFront(setup_phase)
10367                 self._schedule()
10368
10369         def _schedule_unpack(self, unpack_phase):
10370                 """
10371                 Schedule an unpack phase on the unpack queue, in order
10372                 to serialize $DISTDIR access for live ebuilds.
10373                 """
10374                 self._task_queues.unpack.add(unpack_phase)
10375
10376         def _find_blockers(self, new_pkg):
10377                 """
10378                 Returns a callable which should be called only when
10379                 the vdb lock has been acquired.
10380                 """
10381                 def get_blockers():
10382                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10383                 return get_blockers
10384
10385         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10386                 if self._opts_ignore_blockers.intersection(self.myopts):
10387                         return None
10388
10389                 # Call gc.collect() here to avoid heap overflow that
10390                 # triggers 'Cannot allocate memory' errors (reported
10391                 # with python-2.5).
10392                 import gc
10393                 gc.collect()
10394
10395                 blocker_db = self._blocker_db[new_pkg.root]
10396
10397                 blocker_dblinks = []
10398                 for blocking_pkg in blocker_db.findInstalledBlockers(
10399                         new_pkg, acquire_lock=acquire_lock):
10400                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10401                                 continue
10402                         if new_pkg.cpv == blocking_pkg.cpv:
10403                                 continue
10404                         blocker_dblinks.append(portage.dblink(
10405                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10406                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10407                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10408
10409                 gc.collect()
10410
10411                 return blocker_dblinks
10412
10413         def _dblink_pkg(self, pkg_dblink):
10414                 cpv = pkg_dblink.mycpv
10415                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10416                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10417                 installed = type_name == "installed"
10418                 return self._pkg(cpv, type_name, root_config, installed=installed)
10419
10420         def _append_to_log_path(self, log_path, msg):
10421                 f = open(log_path, 'a')
10422                 try:
10423                         f.write(msg)
10424                 finally:
10425                         f.close()
10426
10427         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10428
10429                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10430                 log_file = None
10431                 out = sys.stdout
10432                 background = self._background
10433
10434                 if background and log_path is not None:
10435                         log_file = open(log_path, 'a')
10436                         out = log_file
10437
10438                 try:
10439                         for msg in msgs:
10440                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10441                 finally:
10442                         if log_file is not None:
10443                                 log_file.close()
10444
10445         def _dblink_emerge_log(self, msg):
10446                 self._logger.log(msg)
10447
10448         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10449                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10450                 background = self._background
10451
10452                 if log_path is None:
10453                         if not (background and level < logging.WARN):
10454                                 portage.util.writemsg_level(msg,
10455                                         level=level, noiselevel=noiselevel)
10456                 else:
10457                         if not background:
10458                                 portage.util.writemsg_level(msg,
10459                                         level=level, noiselevel=noiselevel)
10460                         self._append_to_log_path(log_path, msg)
10461
10462         def _dblink_ebuild_phase(self,
10463                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10464                 """
10465                 Using this callback for merge phases allows the scheduler
10466                 to run while these phases execute asynchronously, and allows
10467                 the scheduler control output handling.
10468                 """
10469
10470                 scheduler = self._sched_iface
10471                 settings = pkg_dblink.settings
10472                 pkg = self._dblink_pkg(pkg_dblink)
10473                 background = self._background
10474                 log_path = settings.get("PORTAGE_LOG_FILE")
10475
10476                 ebuild_phase = EbuildPhase(background=background,
10477                         pkg=pkg, phase=phase, scheduler=scheduler,
10478                         settings=settings, tree=pkg_dblink.treetype)
10479                 ebuild_phase.start()
10480                 ebuild_phase.wait()
10481
10482                 return ebuild_phase.returncode
10483
10484         def _generate_digests(self):
10485                 """
10486                 Generate digests if necessary for --digests or FEATURES=digest.
10487                 In order to avoid interference, this must done before parallel
10488                 tasks are started.
10489                 """
10490
10491                 if '--fetchonly' in self.myopts:
10492                         return os.EX_OK
10493
10494                 digest = '--digest' in self.myopts
10495                 if not digest:
10496                         for pkgsettings in self.pkgsettings.itervalues():
10497                                 if 'digest' in pkgsettings.features:
10498                                         digest = True
10499                                         break
10500
10501                 if not digest:
10502                         return os.EX_OK
10503
10504                 for x in self._mergelist:
10505                         if not isinstance(x, Package) or \
10506                                 x.type_name != 'ebuild' or \
10507                                 x.operation != 'merge':
10508                                 continue
10509                         pkgsettings = self.pkgsettings[x.root]
10510                         if '--digest' not in self.myopts and \
10511                                 'digest' not in pkgsettings.features:
10512                                 continue
10513                         portdb = x.root_config.trees['porttree'].dbapi
10514                         ebuild_path = portdb.findname(x.cpv)
10515                         if not ebuild_path:
10516                                 writemsg_level(
10517                                         "!!! Could not locate ebuild for '%s'.\n" \
10518                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10519                                 return 1
10520                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10521                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10522                                 writemsg_level(
10523                                         "!!! Unable to generate manifest for '%s'.\n" \
10524                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10525                                 return 1
10526
10527                 return os.EX_OK
10528
10529         def _check_manifests(self):
10530                 # Verify all the manifests now so that the user is notified of failure
10531                 # as soon as possible.
10532                 if "strict" not in self.settings.features or \
10533                         "--fetchonly" in self.myopts or \
10534                         "--fetch-all-uri" in self.myopts:
10535                         return os.EX_OK
10536
10537                 shown_verifying_msg = False
10538                 quiet_settings = {}
10539                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10540                         quiet_config = portage.config(clone=pkgsettings)
10541                         quiet_config["PORTAGE_QUIET"] = "1"
10542                         quiet_config.backup_changes("PORTAGE_QUIET")
10543                         quiet_settings[myroot] = quiet_config
10544                         del quiet_config
10545
10546                 for x in self._mergelist:
10547                         if not isinstance(x, Package) or \
10548                                 x.type_name != "ebuild":
10549                                 continue
10550
10551                         if not shown_verifying_msg:
10552                                 shown_verifying_msg = True
10553                                 self._status_msg("Verifying ebuild manifests")
10554
10555                         root_config = x.root_config
10556                         portdb = root_config.trees["porttree"].dbapi
10557                         quiet_config = quiet_settings[root_config.root]
10558                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10559                         if not portage.digestcheck([], quiet_config, strict=True):
10560                                 return 1
10561
10562                 return os.EX_OK
10563
10564         def _add_prefetchers(self):
10565
10566                 if not self._parallel_fetch:
10567                         return
10568
10569                 if self._parallel_fetch:
10570                         self._status_msg("Starting parallel fetch")
10571
10572                         prefetchers = self._prefetchers
10573                         getbinpkg = "--getbinpkg" in self.myopts
10574
10575                         # In order to avoid "waiting for lock" messages
10576                         # at the beginning, which annoy users, never
10577                         # spawn a prefetcher for the first package.
10578                         for pkg in self._mergelist[1:]:
10579                                 prefetcher = self._create_prefetcher(pkg)
10580                                 if prefetcher is not None:
10581                                         self._task_queues.fetch.add(prefetcher)
10582                                         prefetchers[pkg] = prefetcher
10583
10584         def _create_prefetcher(self, pkg):
10585                 """
10586                 @return: a prefetcher, or None if not applicable
10587                 """
10588                 prefetcher = None
10589
10590                 if not isinstance(pkg, Package):
10591                         pass
10592
10593                 elif pkg.type_name == "ebuild":
10594
10595                         prefetcher = EbuildFetcher(background=True,
10596                                 config_pool=self._ConfigPool(pkg.root,
10597                                 self._allocate_config, self._deallocate_config),
10598                                 fetchonly=1, logfile=self._fetch_log,
10599                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10600
10601                 elif pkg.type_name == "binary" and \
10602                         "--getbinpkg" in self.myopts and \
10603                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10604
10605                         prefetcher = BinpkgPrefetcher(background=True,
10606                                 pkg=pkg, scheduler=self._sched_iface)
10607
10608                 return prefetcher
10609
10610         def _is_restart_scheduled(self):
10611                 """
10612                 Check if the merge list contains a replacement
10613                 for the current running instance, that will result
10614                 in restart after merge.
10615                 @rtype: bool
10616                 @returns: True if a restart is scheduled, False otherwise.
10617                 """
10618                 if self._opts_no_restart.intersection(self.myopts):
10619                         return False
10620
10621                 mergelist = self._mergelist
10622
10623                 for i, pkg in enumerate(mergelist):
10624                         if self._is_restart_necessary(pkg) and \
10625                                 i != len(mergelist) - 1:
10626                                 return True
10627
10628                 return False
10629
10630         def _is_restart_necessary(self, pkg):
10631                 """
10632                 @return: True if merging the given package
10633                         requires restart, False otherwise.
10634                 """
10635
10636                 # Figure out if we need a restart.
10637                 if pkg.root == self._running_root.root and \
10638                         portage.match_from_list(
10639                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10640                         if self._running_portage:
10641                                 return pkg.cpv != self._running_portage.cpv
10642                         return True
10643                 return False
10644
10645         def _restart_if_necessary(self, pkg):
10646                 """
10647                 Use execv() to restart emerge. This happens
10648                 if portage upgrades itself and there are
10649                 remaining packages in the list.
10650                 """
10651
10652                 if self._opts_no_restart.intersection(self.myopts):
10653                         return
10654
10655                 if not self._is_restart_necessary(pkg):
10656                         return
10657
10658                 if pkg == self._mergelist[-1]:
10659                         return
10660
10661                 self._main_loop_cleanup()
10662
10663                 logger = self._logger
10664                 pkg_count = self._pkg_count
10665                 mtimedb = self._mtimedb
10666                 bad_resume_opts = self._bad_resume_opts
10667
10668                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10669                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10670
10671                 logger.log(" *** RESTARTING " + \
10672                         "emerge via exec() after change of " + \
10673                         "portage version.")
10674
10675                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10676                 mtimedb.commit()
10677                 portage.run_exitfuncs()
10678                 mynewargv = [sys.argv[0], "--resume"]
10679                 resume_opts = self.myopts.copy()
10680                 # For automatic resume, we need to prevent
10681                 # any of bad_resume_opts from leaking in
10682                 # via EMERGE_DEFAULT_OPTS.
10683                 resume_opts["--ignore-default-opts"] = True
10684                 for myopt, myarg in resume_opts.iteritems():
10685                         if myopt not in bad_resume_opts:
10686                                 if myarg is True:
10687                                         mynewargv.append(myopt)
10688                                 else:
10689                                         mynewargv.append(myopt +"="+ str(myarg))
10690                 # priority only needs to be adjusted on the first run
10691                 os.environ["PORTAGE_NICENESS"] = "0"
10692                 os.execv(mynewargv[0], mynewargv)
10693
10694         def merge(self):
10695
10696                 if "--resume" in self.myopts:
10697                         # We're resuming.
10698                         portage.writemsg_stdout(
10699                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10700                         self._logger.log(" *** Resuming merge...")
10701
10702                 self._save_resume_list()
10703
10704                 try:
10705                         self._background = self._background_mode()
10706                 except self._unknown_internal_error:
10707                         return 1
10708
10709                 for root in self.trees:
10710                         root_config = self.trees[root]["root_config"]
10711
10712                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10713                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10714                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10715                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10716                         if not tmpdir or not os.path.isdir(tmpdir):
10717                                 msg = "The directory specified in your " + \
10718                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10719                                 "does not exist. Please create this " + \
10720                                 "directory or correct your PORTAGE_TMPDIR setting."
10721                                 msg = textwrap.wrap(msg, 70)
10722                                 out = portage.output.EOutput()
10723                                 for l in msg:
10724                                         out.eerror(l)
10725                                 return 1
10726
10727                         if self._background:
10728                                 root_config.settings.unlock()
10729                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10730                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10731                                 root_config.settings.lock()
10732
10733                         self.pkgsettings[root] = portage.config(
10734                                 clone=root_config.settings)
10735
10736                 rval = self._generate_digests()
10737                 if rval != os.EX_OK:
10738                         return rval
10739
10740                 rval = self._check_manifests()
10741                 if rval != os.EX_OK:
10742                         return rval
10743
10744                 keep_going = "--keep-going" in self.myopts
10745                 fetchonly = self._build_opts.fetchonly
10746                 mtimedb = self._mtimedb
10747                 failed_pkgs = self._failed_pkgs
10748
10749                 while True:
10750                         rval = self._merge()
10751                         if rval == os.EX_OK or fetchonly or not keep_going:
10752                                 break
10753                         if "resume" not in mtimedb:
10754                                 break
10755                         mergelist = self._mtimedb["resume"].get("mergelist")
10756                         if not mergelist:
10757                                 break
10758
10759                         if not failed_pkgs:
10760                                 break
10761
10762                         for failed_pkg in failed_pkgs:
10763                                 mergelist.remove(list(failed_pkg.pkg))
10764
10765                         self._failed_pkgs_all.extend(failed_pkgs)
10766                         del failed_pkgs[:]
10767
10768                         if not mergelist:
10769                                 break
10770
10771                         if not self._calc_resume_list():
10772                                 break
10773
10774                         clear_caches(self.trees)
10775                         if not self._mergelist:
10776                                 break
10777
10778                         self._save_resume_list()
10779                         self._pkg_count.curval = 0
10780                         self._pkg_count.maxval = len([x for x in self._mergelist \
10781                                 if isinstance(x, Package) and x.operation == "merge"])
10782                         self._status_display.maxval = self._pkg_count.maxval
10783
10784                 self._logger.log(" *** Finished. Cleaning up...")
10785
10786                 if failed_pkgs:
10787                         self._failed_pkgs_all.extend(failed_pkgs)
10788                         del failed_pkgs[:]
10789
10790                 background = self._background
10791                 failure_log_shown = False
10792                 if background and len(self._failed_pkgs_all) == 1:
10793                         # If only one package failed then just show it's
10794                         # whole log for easy viewing.
10795                         failed_pkg = self._failed_pkgs_all[-1]
10796                         build_dir = failed_pkg.build_dir
10797                         log_file = None
10798
10799                         log_paths = [failed_pkg.build_log]
10800
10801                         log_path = self._locate_failure_log(failed_pkg)
10802                         if log_path is not None:
10803                                 try:
10804                                         log_file = open(log_path)
10805                                 except IOError:
10806                                         pass
10807
10808                         if log_file is not None:
10809                                 try:
10810                                         for line in log_file:
10811                                                 writemsg_level(line, noiselevel=-1)
10812                                 finally:
10813                                         log_file.close()
10814                                 failure_log_shown = True
10815
10816                 # Dump mod_echo output now since it tends to flood the terminal.
10817                 # This allows us to avoid having more important output, generated
10818                 # later, from being swept away by the mod_echo output.
10819                 mod_echo_output =  _flush_elog_mod_echo()
10820
10821                 if background and not failure_log_shown and \
10822                         self._failed_pkgs_all and \
10823                         self._failed_pkgs_die_msgs and \
10824                         not mod_echo_output:
10825
10826                         printer = portage.output.EOutput()
10827                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10828                                 root_msg = ""
10829                                 if mysettings["ROOT"] != "/":
10830                                         root_msg = " merged to %s" % mysettings["ROOT"]
10831                                 print
10832                                 printer.einfo("Error messages for package %s%s:" % \
10833                                         (colorize("INFORM", key), root_msg))
10834                                 print
10835                                 for phase in portage.const.EBUILD_PHASES:
10836                                         if phase not in logentries:
10837                                                 continue
10838                                         for msgtype, msgcontent in logentries[phase]:
10839                                                 if isinstance(msgcontent, basestring):
10840                                                         msgcontent = [msgcontent]
10841                                                 for line in msgcontent:
10842                                                         printer.eerror(line.strip("\n"))
10843
10844                 if self._post_mod_echo_msgs:
10845                         for msg in self._post_mod_echo_msgs:
10846                                 msg()
10847
10848                 if len(self._failed_pkgs_all) > 1 or \
10849                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10850                         if len(self._failed_pkgs_all) > 1:
10851                                 msg = "The following %d packages have " % \
10852                                         len(self._failed_pkgs_all) + \
10853                                         "failed to build or install:"
10854                         else:
10855                                 msg = "The following package has " + \
10856                                         "failed to build or install:"
10857                         prefix = bad(" * ")
10858                         writemsg(prefix + "\n", noiselevel=-1)
10859                         from textwrap import wrap
10860                         for line in wrap(msg, 72):
10861                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10862                         writemsg(prefix + "\n", noiselevel=-1)
10863                         for failed_pkg in self._failed_pkgs_all:
10864                                 writemsg("%s\t%s\n" % (prefix,
10865                                         colorize("INFORM", str(failed_pkg.pkg))),
10866                                         noiselevel=-1)
10867                         writemsg(prefix + "\n", noiselevel=-1)
10868
10869                 return rval
10870
10871         def _elog_listener(self, mysettings, key, logentries, fulltext):
10872                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10873                 if errors:
10874                         self._failed_pkgs_die_msgs.append(
10875                                 (mysettings, key, errors))
10876
10877         def _locate_failure_log(self, failed_pkg):
10878
10879                 build_dir = failed_pkg.build_dir
10880                 log_file = None
10881
10882                 log_paths = [failed_pkg.build_log]
10883
10884                 for log_path in log_paths:
10885                         if not log_path:
10886                                 continue
10887
10888                         try:
10889                                 log_size = os.stat(log_path).st_size
10890                         except OSError:
10891                                 continue
10892
10893                         if log_size == 0:
10894                                 continue
10895
10896                         return log_path
10897
10898                 return None
10899
10900         def _add_packages(self):
10901                 pkg_queue = self._pkg_queue
10902                 for pkg in self._mergelist:
10903                         if isinstance(pkg, Package):
10904                                 pkg_queue.append(pkg)
10905                         elif isinstance(pkg, Blocker):
10906                                 pass
10907
10908         def _system_merge_started(self, merge):
10909                 """
10910                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10911                 """
10912                 graph = self._digraph
10913                 if graph is None:
10914                         return
10915                 pkg = merge.merge.pkg
10916
10917                 # Skip this if $ROOT != / since it shouldn't matter if there
10918                 # are unsatisfied system runtime deps in this case.
10919                 if pkg.root != '/':
10920                         return
10921
10922                 completed_tasks = self._completed_tasks
10923                 unsatisfied = self._unsatisfied_system_deps
10924
10925                 def ignore_non_runtime_or_satisfied(priority):
10926                         """
10927                         Ignore non-runtime and satisfied runtime priorities.
10928                         """
10929                         if isinstance(priority, DepPriority) and \
10930                                 not priority.satisfied and \
10931                                 (priority.runtime or priority.runtime_post):
10932                                 return False
10933                         return True
10934
10935                 # When checking for unsatisfied runtime deps, only check
10936                 # direct deps since indirect deps are checked when the
10937                 # corresponding parent is merged.
10938                 for child in graph.child_nodes(pkg,
10939                         ignore_priority=ignore_non_runtime_or_satisfied):
10940                         if not isinstance(child, Package) or \
10941                                 child.operation == 'uninstall':
10942                                 continue
10943                         if child is pkg:
10944                                 continue
10945                         if child.operation == 'merge' and \
10946                                 child not in completed_tasks:
10947                                 unsatisfied.add(child)
10948
10949         def _merge_wait_exit_handler(self, task):
10950                 self._merge_wait_scheduled.remove(task)
10951                 self._merge_exit(task)
10952
10953         def _merge_exit(self, merge):
10954                 self._do_merge_exit(merge)
10955                 self._deallocate_config(merge.merge.settings)
10956                 if merge.returncode == os.EX_OK and \
10957                         not merge.merge.pkg.installed:
10958                         self._status_display.curval += 1
10959                 self._status_display.merges = len(self._task_queues.merge)
10960                 self._schedule()
10961
10962         def _do_merge_exit(self, merge):
10963                 pkg = merge.merge.pkg
10964                 if merge.returncode != os.EX_OK:
10965                         settings = merge.merge.settings
10966                         build_dir = settings.get("PORTAGE_BUILDDIR")
10967                         build_log = settings.get("PORTAGE_LOG_FILE")
10968
10969                         self._failed_pkgs.append(self._failed_pkg(
10970                                 build_dir=build_dir, build_log=build_log,
10971                                 pkg=pkg,
10972                                 returncode=merge.returncode))
10973                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10974
10975                         self._status_display.failed = len(self._failed_pkgs)
10976                         return
10977
10978                 self._task_complete(pkg)
10979                 pkg_to_replace = merge.merge.pkg_to_replace
10980                 if pkg_to_replace is not None:
10981                         # When a package is replaced, mark it's uninstall
10982                         # task complete (if any).
10983                         uninst_hash_key = \
10984                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10985                         self._task_complete(uninst_hash_key)
10986
10987                 if pkg.installed:
10988                         return
10989
10990                 self._restart_if_necessary(pkg)
10991
10992                 # Call mtimedb.commit() after each merge so that
10993                 # --resume still works after being interrupted
10994                 # by reboot, sigkill or similar.
10995                 mtimedb = self._mtimedb
10996                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10997                 if not mtimedb["resume"]["mergelist"]:
10998                         del mtimedb["resume"]
10999                 mtimedb.commit()
11000
11001         def _build_exit(self, build):
11002                 if build.returncode == os.EX_OK:
11003                         self.curval += 1
11004                         merge = PackageMerge(merge=build)
11005                         if not build.build_opts.buildpkgonly and \
11006                                 build.pkg in self._deep_system_deps:
11007                                 # Since dependencies on system packages are frequently
11008                                 # unspecified, merge them only when no builds are executing.
11009                                 self._merge_wait_queue.append(merge)
11010                                 merge.addStartListener(self._system_merge_started)
11011                         else:
11012                                 merge.addExitListener(self._merge_exit)
11013                                 self._task_queues.merge.add(merge)
11014                                 self._status_display.merges = len(self._task_queues.merge)
11015                 else:
11016                         settings = build.settings
11017                         build_dir = settings.get("PORTAGE_BUILDDIR")
11018                         build_log = settings.get("PORTAGE_LOG_FILE")
11019
11020                         self._failed_pkgs.append(self._failed_pkg(
11021                                 build_dir=build_dir, build_log=build_log,
11022                                 pkg=build.pkg,
11023                                 returncode=build.returncode))
11024                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11025
11026                         self._status_display.failed = len(self._failed_pkgs)
11027                         self._deallocate_config(build.settings)
11028                 self._jobs -= 1
11029                 self._status_display.running = self._jobs
11030                 self._schedule()
11031
11032         def _extract_exit(self, build):
11033                 self._build_exit(build)
11034
11035         def _task_complete(self, pkg):
11036                 self._completed_tasks.add(pkg)
11037                 self._unsatisfied_system_deps.discard(pkg)
11038                 self._choose_pkg_return_early = False
11039
11040         def _merge(self):
11041
11042                 self._add_prefetchers()
11043                 self._add_packages()
11044                 pkg_queue = self._pkg_queue
11045                 failed_pkgs = self._failed_pkgs
11046                 portage.locks._quiet = self._background
11047                 portage.elog._emerge_elog_listener = self._elog_listener
11048                 rval = os.EX_OK
11049
11050                 try:
11051                         self._main_loop()
11052                 finally:
11053                         self._main_loop_cleanup()
11054                         portage.locks._quiet = False
11055                         portage.elog._emerge_elog_listener = None
11056                         if failed_pkgs:
11057                                 rval = failed_pkgs[-1].returncode
11058
11059                 return rval
11060
11061         def _main_loop_cleanup(self):
11062                 del self._pkg_queue[:]
11063                 self._completed_tasks.clear()
11064                 self._deep_system_deps.clear()
11065                 self._unsatisfied_system_deps.clear()
11066                 self._choose_pkg_return_early = False
11067                 self._status_display.reset()
11068                 self._digraph = None
11069                 self._task_queues.fetch.clear()
11070
11071         def _choose_pkg(self):
11072                 """
11073                 Choose a task that has all it's dependencies satisfied.
11074                 """
11075
11076                 if self._choose_pkg_return_early:
11077                         return None
11078
11079                 if self._digraph is None:
11080                         if (self._jobs or self._task_queues.merge) and \
11081                                 not ("--nodeps" in self.myopts and \
11082                                 (self._max_jobs is True or self._max_jobs > 1)):
11083                                 self._choose_pkg_return_early = True
11084                                 return None
11085                         return self._pkg_queue.pop(0)
11086
11087                 if not (self._jobs or self._task_queues.merge):
11088                         return self._pkg_queue.pop(0)
11089
11090                 self._prune_digraph()
11091
11092                 chosen_pkg = None
11093                 later = set(self._pkg_queue)
11094                 for pkg in self._pkg_queue:
11095                         later.remove(pkg)
11096                         if not self._dependent_on_scheduled_merges(pkg, later):
11097                                 chosen_pkg = pkg
11098                                 break
11099
11100                 if chosen_pkg is not None:
11101                         self._pkg_queue.remove(chosen_pkg)
11102
11103                 if chosen_pkg is None:
11104                         # There's no point in searching for a package to
11105                         # choose until at least one of the existing jobs
11106                         # completes.
11107                         self._choose_pkg_return_early = True
11108
11109                 return chosen_pkg
11110
11111         def _dependent_on_scheduled_merges(self, pkg, later):
11112                 """
11113                 Traverse the subgraph of the given packages deep dependencies
11114                 to see if it contains any scheduled merges.
11115                 @param pkg: a package to check dependencies for
11116                 @type pkg: Package
11117                 @param later: packages for which dependence should be ignored
11118                         since they will be merged later than pkg anyway and therefore
11119                         delaying the merge of pkg will not result in a more optimal
11120                         merge order
11121                 @type later: set
11122                 @rtype: bool
11123                 @returns: True if the package is dependent, False otherwise.
11124                 """
11125
11126                 graph = self._digraph
11127                 completed_tasks = self._completed_tasks
11128
11129                 dependent = False
11130                 traversed_nodes = set([pkg])
11131                 direct_deps = graph.child_nodes(pkg)
11132                 node_stack = direct_deps
11133                 direct_deps = frozenset(direct_deps)
11134                 while node_stack:
11135                         node = node_stack.pop()
11136                         if node in traversed_nodes:
11137                                 continue
11138                         traversed_nodes.add(node)
11139                         if not ((node.installed and node.operation == "nomerge") or \
11140                                 (node.operation == "uninstall" and \
11141                                 node not in direct_deps) or \
11142                                 node in completed_tasks or \
11143                                 node in later):
11144                                 dependent = True
11145                                 break
11146                         node_stack.extend(graph.child_nodes(node))
11147
11148                 return dependent
11149
11150         def _allocate_config(self, root):
11151                 """
11152                 Allocate a unique config instance for a task in order
11153                 to prevent interference between parallel tasks.
11154                 """
11155                 if self._config_pool[root]:
11156                         temp_settings = self._config_pool[root].pop()
11157                 else:
11158                         temp_settings = portage.config(clone=self.pkgsettings[root])
11159                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11160                 # performance reasons, call it here to make sure all settings from the
11161                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11162                 temp_settings.reload()
11163                 temp_settings.reset()
11164                 return temp_settings
11165
11166         def _deallocate_config(self, settings):
11167                 self._config_pool[settings["ROOT"]].append(settings)
11168
11169         def _main_loop(self):
11170
11171                 # Only allow 1 job max if a restart is scheduled
11172                 # due to portage update.
11173                 if self._is_restart_scheduled() or \
11174                         self._opts_no_background.intersection(self.myopts):
11175                         self._set_max_jobs(1)
11176
11177                 merge_queue = self._task_queues.merge
11178
11179                 while self._schedule():
11180                         if self._poll_event_handlers:
11181                                 self._poll_loop()
11182
11183                 while True:
11184                         self._schedule()
11185                         if not (self._jobs or merge_queue):
11186                                 break
11187                         if self._poll_event_handlers:
11188                                 self._poll_loop()
11189
11190         def _keep_scheduling(self):
11191                 return bool(self._pkg_queue and \
11192                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11193
11194         def _schedule_tasks(self):
11195
11196                 # When the number of jobs drops to zero, process all waiting merges.
11197                 if not self._jobs and self._merge_wait_queue:
11198                         for task in self._merge_wait_queue:
11199                                 task.addExitListener(self._merge_wait_exit_handler)
11200                                 self._task_queues.merge.add(task)
11201                         self._status_display.merges = len(self._task_queues.merge)
11202                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11203                         del self._merge_wait_queue[:]
11204
11205                 self._schedule_tasks_imp()
11206                 self._status_display.display()
11207
11208                 state_change = 0
11209                 for q in self._task_queues.values():
11210                         if q.schedule():
11211                                 state_change += 1
11212
11213                 # Cancel prefetchers if they're the only reason
11214                 # the main poll loop is still running.
11215                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11216                         not (self._jobs or self._task_queues.merge) and \
11217                         self._task_queues.fetch:
11218                         self._task_queues.fetch.clear()
11219                         state_change += 1
11220
11221                 if state_change:
11222                         self._schedule_tasks_imp()
11223                         self._status_display.display()
11224
11225                 return self._keep_scheduling()
11226
11227         def _job_delay(self):
11228                 """
11229                 @rtype: bool
11230                 @returns: True if job scheduling should be delayed, False otherwise.
11231                 """
11232
11233                 if self._jobs and self._max_load is not None:
11234
11235                         current_time = time.time()
11236
11237                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11238                         if delay > self._job_delay_max:
11239                                 delay = self._job_delay_max
11240                         if (current_time - self._previous_job_start_time) < delay:
11241                                 return True
11242
11243                 return False
11244
11245         def _schedule_tasks_imp(self):
11246                 """
11247                 @rtype: bool
11248                 @returns: True if state changed, False otherwise.
11249                 """
11250
11251                 state_change = 0
11252
11253                 while True:
11254
11255                         if not self._keep_scheduling():
11256                                 return bool(state_change)
11257
11258                         if self._choose_pkg_return_early or \
11259                                 self._merge_wait_scheduled or \
11260                                 (self._jobs and self._unsatisfied_system_deps) or \
11261                                 not self._can_add_job() or \
11262                                 self._job_delay():
11263                                 return bool(state_change)
11264
11265                         pkg = self._choose_pkg()
11266                         if pkg is None:
11267                                 return bool(state_change)
11268
11269                         state_change += 1
11270
11271                         if not pkg.installed:
11272                                 self._pkg_count.curval += 1
11273
11274                         task = self._task(pkg)
11275
11276                         if pkg.installed:
11277                                 merge = PackageMerge(merge=task)
11278                                 merge.addExitListener(self._merge_exit)
11279                                 self._task_queues.merge.add(merge)
11280
11281                         elif pkg.built:
11282                                 self._jobs += 1
11283                                 self._previous_job_start_time = time.time()
11284                                 self._status_display.running = self._jobs
11285                                 task.addExitListener(self._extract_exit)
11286                                 self._task_queues.jobs.add(task)
11287
11288                         else:
11289                                 self._jobs += 1
11290                                 self._previous_job_start_time = time.time()
11291                                 self._status_display.running = self._jobs
11292                                 task.addExitListener(self._build_exit)
11293                                 self._task_queues.jobs.add(task)
11294
11295                 return bool(state_change)
11296
11297         def _task(self, pkg):
11298
11299                 pkg_to_replace = None
11300                 if pkg.operation != "uninstall":
11301                         vardb = pkg.root_config.trees["vartree"].dbapi
11302                         previous_cpv = vardb.match(pkg.slot_atom)
11303                         if previous_cpv:
11304                                 previous_cpv = previous_cpv.pop()
11305                                 pkg_to_replace = self._pkg(previous_cpv,
11306                                         "installed", pkg.root_config, installed=True)
11307
11308                 task = MergeListItem(args_set=self._args_set,
11309                         background=self._background, binpkg_opts=self._binpkg_opts,
11310                         build_opts=self._build_opts,
11311                         config_pool=self._ConfigPool(pkg.root,
11312                         self._allocate_config, self._deallocate_config),
11313                         emerge_opts=self.myopts,
11314                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11315                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11316                         pkg_to_replace=pkg_to_replace,
11317                         prefetcher=self._prefetchers.get(pkg),
11318                         scheduler=self._sched_iface,
11319                         settings=self._allocate_config(pkg.root),
11320                         statusMessage=self._status_msg,
11321                         world_atom=self._world_atom)
11322
11323                 return task
11324
11325         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11326                 pkg = failed_pkg.pkg
11327                 msg = "%s to %s %s" % \
11328                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11329                 if pkg.root != "/":
11330                         msg += " %s %s" % (preposition, pkg.root)
11331
11332                 log_path = self._locate_failure_log(failed_pkg)
11333                 if log_path is not None:
11334                         msg += ", Log file:"
11335                 self._status_msg(msg)
11336
11337                 if log_path is not None:
11338                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11339
11340         def _status_msg(self, msg):
11341                 """
11342                 Display a brief status message (no newlines) in the status display.
11343                 This is called by tasks to provide feedback to the user. This
11344                 delegates the resposibility of generating \r and \n control characters,
11345                 to guarantee that lines are created or erased when necessary and
11346                 appropriate.
11347
11348                 @type msg: str
11349                 @param msg: a brief status message (no newlines allowed)
11350                 """
11351                 if not self._background:
11352                         writemsg_level("\n")
11353                 self._status_display.displayMessage(msg)
11354
11355         def _save_resume_list(self):
11356                 """
11357                 Do this before verifying the ebuild Manifests since it might
11358                 be possible for the user to use --resume --skipfirst get past
11359                 a non-essential package with a broken digest.
11360                 """
11361                 mtimedb = self._mtimedb
11362                 mtimedb["resume"]["mergelist"] = [list(x) \
11363                         for x in self._mergelist \
11364                         if isinstance(x, Package) and x.operation == "merge"]
11365
11366                 mtimedb.commit()
11367
11368         def _calc_resume_list(self):
11369                 """
11370                 Use the current resume list to calculate a new one,
11371                 dropping any packages with unsatisfied deps.
11372                 @rtype: bool
11373                 @returns: True if successful, False otherwise.
11374                 """
11375                 print colorize("GOOD", "*** Resuming merge...")
11376
11377                 if self._show_list():
11378                         if "--tree" in self.myopts:
11379                                 portage.writemsg_stdout("\n" + \
11380                                         darkgreen("These are the packages that " + \
11381                                         "would be merged, in reverse order:\n\n"))
11382
11383                         else:
11384                                 portage.writemsg_stdout("\n" + \
11385                                         darkgreen("These are the packages that " + \
11386                                         "would be merged, in order:\n\n"))
11387
11388                 show_spinner = "--quiet" not in self.myopts and \
11389                         "--nodeps" not in self.myopts
11390
11391                 if show_spinner:
11392                         print "Calculating dependencies  ",
11393
11394                 myparams = create_depgraph_params(self.myopts, None)
11395                 success = False
11396                 e = None
11397                 try:
11398                         success, mydepgraph, dropped_tasks = resume_depgraph(
11399                                 self.settings, self.trees, self._mtimedb, self.myopts,
11400                                 myparams, self._spinner)
11401                 except depgraph.UnsatisfiedResumeDep, exc:
11402                         # rename variable to avoid python-3.0 error:
11403                         # SyntaxError: can not delete variable 'e' referenced in nested
11404                         #              scope
11405                         e = exc
11406                         mydepgraph = e.depgraph
11407                         dropped_tasks = set()
11408
11409                 if show_spinner:
11410                         print "\b\b... done!"
11411
11412                 if e is not None:
11413                         def unsatisfied_resume_dep_msg():
11414                                 mydepgraph.display_problems()
11415                                 out = portage.output.EOutput()
11416                                 out.eerror("One or more packages are either masked or " + \
11417                                         "have missing dependencies:")
11418                                 out.eerror("")
11419                                 indent = "  "
11420                                 show_parents = set()
11421                                 for dep in e.value:
11422                                         if dep.parent in show_parents:
11423                                                 continue
11424                                         show_parents.add(dep.parent)
11425                                         if dep.atom is None:
11426                                                 out.eerror(indent + "Masked package:")
11427                                                 out.eerror(2 * indent + str(dep.parent))
11428                                                 out.eerror("")
11429                                         else:
11430                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11431                                                 out.eerror(2 * indent + str(dep.parent))
11432                                                 out.eerror("")
11433                                 msg = "The resume list contains packages " + \
11434                                         "that are either masked or have " + \
11435                                         "unsatisfied dependencies. " + \
11436                                         "Please restart/continue " + \
11437                                         "the operation manually, or use --skipfirst " + \
11438                                         "to skip the first package in the list and " + \
11439                                         "any other packages that may be " + \
11440                                         "masked or have missing dependencies."
11441                                 for line in textwrap.wrap(msg, 72):
11442                                         out.eerror(line)
11443                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11444                         return False
11445
11446                 if success and self._show_list():
11447                         mylist = mydepgraph.altlist()
11448                         if mylist:
11449                                 if "--tree" in self.myopts:
11450                                         mylist.reverse()
11451                                 mydepgraph.display(mylist, favorites=self._favorites)
11452
11453                 if not success:
11454                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11455                         return False
11456                 mydepgraph.display_problems()
11457
11458                 mylist = mydepgraph.altlist()
11459                 mydepgraph.break_refs(mylist)
11460                 mydepgraph.break_refs(dropped_tasks)
11461                 self._mergelist = mylist
11462                 self._set_digraph(mydepgraph.schedulerGraph())
11463
11464                 msg_width = 75
11465                 for task in dropped_tasks:
11466                         if not (isinstance(task, Package) and task.operation == "merge"):
11467                                 continue
11468                         pkg = task
11469                         msg = "emerge --keep-going:" + \
11470                                 " %s" % (pkg.cpv,)
11471                         if pkg.root != "/":
11472                                 msg += " for %s" % (pkg.root,)
11473                         msg += " dropped due to unsatisfied dependency."
11474                         for line in textwrap.wrap(msg, msg_width):
11475                                 eerror(line, phase="other", key=pkg.cpv)
11476                         settings = self.pkgsettings[pkg.root]
11477                         # Ensure that log collection from $T is disabled inside
11478                         # elog_process(), since any logs that might exist are
11479                         # not valid here.
11480                         settings.pop("T", None)
11481                         portage.elog.elog_process(pkg.cpv, settings)
11482                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11483
11484                 return True
11485
11486         def _show_list(self):
11487                 myopts = self.myopts
11488                 if "--quiet" not in myopts and \
11489                         ("--ask" in myopts or "--tree" in myopts or \
11490                         "--verbose" in myopts):
11491                         return True
11492                 return False
11493
11494         def _world_atom(self, pkg):
11495                 """
11496                 Add the package to the world file, but only if
11497                 it's supposed to be added. Otherwise, do nothing.
11498                 """
11499
11500                 if set(("--buildpkgonly", "--fetchonly",
11501                         "--fetch-all-uri",
11502                         "--oneshot", "--onlydeps",
11503                         "--pretend")).intersection(self.myopts):
11504                         return
11505
11506                 if pkg.root != self.target_root:
11507                         return
11508
11509                 args_set = self._args_set
11510                 if not args_set.findAtomForPackage(pkg):
11511                         return
11512
11513                 logger = self._logger
11514                 pkg_count = self._pkg_count
11515                 root_config = pkg.root_config
11516                 world_set = root_config.sets["world"]
11517                 world_locked = False
11518                 if hasattr(world_set, "lock"):
11519                         world_set.lock()
11520                         world_locked = True
11521
11522                 try:
11523                         if hasattr(world_set, "load"):
11524                                 world_set.load() # maybe it's changed on disk
11525
11526                         atom = create_world_atom(pkg, args_set, root_config)
11527                         if atom:
11528                                 if hasattr(world_set, "add"):
11529                                         self._status_msg(('Recording %s in "world" ' + \
11530                                                 'favorites file...') % atom)
11531                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11532                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11533                                         world_set.add(atom)
11534                                 else:
11535                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11536                                                 (atom,), level=logging.WARN, noiselevel=-1)
11537                 finally:
11538                         if world_locked:
11539                                 world_set.unlock()
11540
11541         def _pkg(self, cpv, type_name, root_config, installed=False):
11542                 """
11543                 Get a package instance from the cache, or create a new
11544                 one if necessary. Raises KeyError from aux_get if it
11545                 failures for some reason (package does not exist or is
11546                 corrupt).
11547                 """
11548                 operation = "merge"
11549                 if installed:
11550                         operation = "nomerge"
11551
11552                 if self._digraph is not None:
11553                         # Reuse existing instance when available.
11554                         pkg = self._digraph.get(
11555                                 (type_name, root_config.root, cpv, operation))
11556                         if pkg is not None:
11557                                 return pkg
11558
11559                 tree_type = depgraph.pkg_tree_map[type_name]
11560                 db = root_config.trees[tree_type].dbapi
11561                 db_keys = list(self.trees[root_config.root][
11562                         tree_type].dbapi._aux_cache_keys)
11563                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11564                 pkg = Package(cpv=cpv, metadata=metadata,
11565                         root_config=root_config, installed=installed)
11566                 if type_name == "ebuild":
11567                         settings = self.pkgsettings[root_config.root]
11568                         settings.setcpv(pkg)
11569                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11570                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11571
11572                 return pkg
11573
11574 class MetadataRegen(PollScheduler):
11575
11576         def __init__(self, portdb, cp_iter=None, consumer=None,
11577                 max_jobs=None, max_load=None):
11578                 PollScheduler.__init__(self)
11579                 self._portdb = portdb
11580                 self._global_cleanse = False
11581                 if cp_iter is None:
11582                         cp_iter = self._iter_every_cp()
11583                         # We can globally cleanse stale cache only if we
11584                         # iterate over every single cp.
11585                         self._global_cleanse = True
11586                 self._cp_iter = cp_iter
11587                 self._consumer = consumer
11588
11589                 if max_jobs is None:
11590                         max_jobs = 1
11591
11592                 self._max_jobs = max_jobs
11593                 self._max_load = max_load
11594                 self._sched_iface = self._sched_iface_class(
11595                         register=self._register,
11596                         schedule=self._schedule_wait,
11597                         unregister=self._unregister)
11598
11599                 self._valid_pkgs = set()
11600                 self._cp_set = set()
11601                 self._process_iter = self._iter_metadata_processes()
11602                 self.returncode = os.EX_OK
11603                 self._error_count = 0
11604
11605         def _iter_every_cp(self):
11606                 every_cp = self._portdb.cp_all()
11607                 every_cp.sort(reverse=True)
11608                 try:
11609                         while True:
11610                                 yield every_cp.pop()
11611                 except IndexError:
11612                         pass
11613
11614         def _iter_metadata_processes(self):
11615                 portdb = self._portdb
11616                 valid_pkgs = self._valid_pkgs
11617                 cp_set = self._cp_set
11618                 consumer = self._consumer
11619
11620                 for cp in self._cp_iter:
11621                         cp_set.add(cp)
11622                         portage.writemsg_stdout("Processing %s\n" % cp)
11623                         cpv_list = portdb.cp_list(cp)
11624                         for cpv in cpv_list:
11625                                 valid_pkgs.add(cpv)
11626                                 ebuild_path, repo_path = portdb.findname2(cpv)
11627                                 metadata, st, emtime = portdb._pull_valid_cache(
11628                                         cpv, ebuild_path, repo_path)
11629                                 if metadata is not None:
11630                                         if consumer is not None:
11631                                                 consumer(cpv, ebuild_path,
11632                                                         repo_path, metadata)
11633                                         continue
11634
11635                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11636                                         ebuild_mtime=emtime,
11637                                         metadata_callback=portdb._metadata_callback,
11638                                         portdb=portdb, repo_path=repo_path,
11639                                         settings=portdb.doebuild_settings)
11640
11641         def run(self):
11642
11643                 portdb = self._portdb
11644                 from portage.cache.cache_errors import CacheError
11645                 dead_nodes = {}
11646
11647                 while self._schedule():
11648                         self._poll_loop()
11649
11650                 while self._jobs:
11651                         self._poll_loop()
11652
11653                 if self._global_cleanse:
11654                         for mytree in portdb.porttrees:
11655                                 try:
11656                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11657                                 except CacheError, e:
11658                                         portage.writemsg("Error listing cache entries for " + \
11659                                                 "'%s': %s, continuing...\n" % (mytree, e),
11660                                                 noiselevel=-1)
11661                                         del e
11662                                         dead_nodes = None
11663                                         break
11664                 else:
11665                         cp_set = self._cp_set
11666                         cpv_getkey = portage.cpv_getkey
11667                         for mytree in portdb.porttrees:
11668                                 try:
11669                                         dead_nodes[mytree] = set(cpv for cpv in \
11670                                                 portdb.auxdb[mytree].iterkeys() \
11671                                                 if cpv_getkey(cpv) in cp_set)
11672                                 except CacheError, e:
11673                                         portage.writemsg("Error listing cache entries for " + \
11674                                                 "'%s': %s, continuing...\n" % (mytree, e),
11675                                                 noiselevel=-1)
11676                                         del e
11677                                         dead_nodes = None
11678                                         break
11679
11680                 if dead_nodes:
11681                         for y in self._valid_pkgs:
11682                                 for mytree in portdb.porttrees:
11683                                         if portdb.findname2(y, mytree=mytree)[0]:
11684                                                 dead_nodes[mytree].discard(y)
11685
11686                         for mytree, nodes in dead_nodes.iteritems():
11687                                 auxdb = portdb.auxdb[mytree]
11688                                 for y in nodes:
11689                                         try:
11690                                                 del auxdb[y]
11691                                         except (KeyError, CacheError):
11692                                                 pass
11693
11694         def _schedule_tasks(self):
11695                 """
11696                 @rtype: bool
11697                 @returns: True if there may be remaining tasks to schedule,
11698                         False otherwise.
11699                 """
11700                 while self._can_add_job():
11701                         try:
11702                                 metadata_process = self._process_iter.next()
11703                         except StopIteration:
11704                                 return False
11705
11706                         self._jobs += 1
11707                         metadata_process.scheduler = self._sched_iface
11708                         metadata_process.addExitListener(self._metadata_exit)
11709                         metadata_process.start()
11710                 return True
11711
11712         def _metadata_exit(self, metadata_process):
11713                 self._jobs -= 1
11714                 if metadata_process.returncode != os.EX_OK:
11715                         self.returncode = 1
11716                         self._error_count += 1
11717                         self._valid_pkgs.discard(metadata_process.cpv)
11718                         portage.writemsg("Error processing %s, continuing...\n" % \
11719                                 (metadata_process.cpv,), noiselevel=-1)
11720
11721                 if self._consumer is not None:
11722                         # On failure, still notify the consumer (in this case the metadata
11723                         # argument is None).
11724                         self._consumer(metadata_process.cpv,
11725                                 metadata_process.ebuild_path,
11726                                 metadata_process.repo_path,
11727                                 metadata_process.metadata)
11728
11729                 self._schedule()
11730
11731 class UninstallFailure(portage.exception.PortageException):
11732         """
11733         An instance of this class is raised by unmerge() when
11734         an uninstallation fails.
11735         """
11736         status = 1
11737         def __init__(self, *pargs):
11738                 portage.exception.PortageException.__init__(self, pargs)
11739                 if pargs:
11740                         self.status = pargs[0]
11741
11742 def unmerge(root_config, myopts, unmerge_action,
11743         unmerge_files, ldpath_mtimes, autoclean=0,
11744         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11745         scheduler=None, writemsg_level=portage.util.writemsg_level):
11746
11747         quiet = "--quiet" in myopts
11748         settings = root_config.settings
11749         sets = root_config.sets
11750         vartree = root_config.trees["vartree"]
11751         candidate_catpkgs=[]
11752         global_unmerge=0
11753         xterm_titles = "notitles" not in settings.features
11754         out = portage.output.EOutput()
11755         pkg_cache = {}
11756         db_keys = list(vartree.dbapi._aux_cache_keys)
11757
11758         def _pkg(cpv):
11759                 pkg = pkg_cache.get(cpv)
11760                 if pkg is None:
11761                         pkg = Package(cpv=cpv, installed=True,
11762                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11763                                 root_config=root_config,
11764                                 type_name="installed")
11765                         pkg_cache[cpv] = pkg
11766                 return pkg
11767
11768         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11769         try:
11770                 # At least the parent needs to exist for the lock file.
11771                 portage.util.ensure_dirs(vdb_path)
11772         except portage.exception.PortageException:
11773                 pass
11774         vdb_lock = None
11775         try:
11776                 if os.access(vdb_path, os.W_OK):
11777                         vdb_lock = portage.locks.lockdir(vdb_path)
11778                 realsyslist = sets["system"].getAtoms()
11779                 syslist = []
11780                 for x in realsyslist:
11781                         mycp = portage.dep_getkey(x)
11782                         if mycp in settings.getvirtuals():
11783                                 providers = []
11784                                 for provider in settings.getvirtuals()[mycp]:
11785                                         if vartree.dbapi.match(provider):
11786                                                 providers.append(provider)
11787                                 if len(providers) == 1:
11788                                         syslist.extend(providers)
11789                         else:
11790                                 syslist.append(mycp)
11791         
11792                 mysettings = portage.config(clone=settings)
11793         
11794                 if not unmerge_files:
11795                         if unmerge_action == "unmerge":
11796                                 print
11797                                 print bold("emerge unmerge") + " can only be used with specific package names"
11798                                 print
11799                                 return 0
11800                         else:
11801                                 global_unmerge = 1
11802         
11803                 localtree = vartree
11804                 # process all arguments and add all
11805                 # valid db entries to candidate_catpkgs
11806                 if global_unmerge:
11807                         if not unmerge_files:
11808                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11809                 else:
11810                         #we've got command-line arguments
11811                         if not unmerge_files:
11812                                 print "\nNo packages to unmerge have been provided.\n"
11813                                 return 0
11814                         for x in unmerge_files:
11815                                 arg_parts = x.split('/')
11816                                 if x[0] not in [".","/"] and \
11817                                         arg_parts[-1][-7:] != ".ebuild":
11818                                         #possible cat/pkg or dep; treat as such
11819                                         candidate_catpkgs.append(x)
11820                                 elif unmerge_action in ["prune","clean"]:
11821                                         print "\n!!! Prune and clean do not accept individual" + \
11822                                                 " ebuilds as arguments;\n    skipping.\n"
11823                                         continue
11824                                 else:
11825                                         # it appears that the user is specifying an installed
11826                                         # ebuild and we're in "unmerge" mode, so it's ok.
11827                                         if not os.path.exists(x):
11828                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11829                                                 return 0
11830         
11831                                         absx   = os.path.abspath(x)
11832                                         sp_absx = absx.split("/")
11833                                         if sp_absx[-1][-7:] == ".ebuild":
11834                                                 del sp_absx[-1]
11835                                                 absx = "/".join(sp_absx)
11836         
11837                                         sp_absx_len = len(sp_absx)
11838         
11839                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11840                                         vdb_len  = len(vdb_path)
11841         
11842                                         sp_vdb     = vdb_path.split("/")
11843                                         sp_vdb_len = len(sp_vdb)
11844         
11845                                         if not os.path.exists(absx+"/CONTENTS"):
11846                                                 print "!!! Not a valid db dir: "+str(absx)
11847                                                 return 0
11848         
11849                                         if sp_absx_len <= sp_vdb_len:
11850                                                 # The Path is shorter... so it can't be inside the vdb.
11851                                                 print sp_absx
11852                                                 print absx
11853                                                 print "\n!!!",x,"cannot be inside "+ \
11854                                                         vdb_path+"; aborting.\n"
11855                                                 return 0
11856         
11857                                         for idx in range(0,sp_vdb_len):
11858                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11859                                                         print sp_absx
11860                                                         print absx
11861                                                         print "\n!!!", x, "is not inside "+\
11862                                                                 vdb_path+"; aborting.\n"
11863                                                         return 0
11864         
11865                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11866                                         candidate_catpkgs.append(
11867                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11868         
11869                 newline=""
11870                 if (not "--quiet" in myopts):
11871                         newline="\n"
11872                 if settings["ROOT"] != "/":
11873                         writemsg_level(darkgreen(newline+ \
11874                                 ">>> Using system located in ROOT tree %s\n" % \
11875                                 settings["ROOT"]))
11876
11877                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11878                         not ("--quiet" in myopts):
11879                         writemsg_level(darkgreen(newline+\
11880                                 ">>> These are the packages that would be unmerged:\n"))
11881
11882                 # Preservation of order is required for --depclean and --prune so
11883                 # that dependencies are respected. Use all_selected to eliminate
11884                 # duplicate packages since the same package may be selected by
11885                 # multiple atoms.
11886                 pkgmap = []
11887                 all_selected = set()
11888                 for x in candidate_catpkgs:
11889                         # cycle through all our candidate deps and determine
11890                         # what will and will not get unmerged
11891                         try:
11892                                 mymatch = vartree.dbapi.match(x)
11893                         except portage.exception.AmbiguousPackageName, errpkgs:
11894                                 print "\n\n!!! The short ebuild name \"" + \
11895                                         x + "\" is ambiguous.  Please specify"
11896                                 print "!!! one of the following fully-qualified " + \
11897                                         "ebuild names instead:\n"
11898                                 for i in errpkgs[0]:
11899                                         print "    " + green(i)
11900                                 print
11901                                 sys.exit(1)
11902         
11903                         if not mymatch and x[0] not in "<>=~":
11904                                 mymatch = localtree.dep_match(x)
11905                         if not mymatch:
11906                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11907                                         (x, unmerge_action), noiselevel=-1)
11908                                 continue
11909
11910                         pkgmap.append(
11911                                 {"protected": set(), "selected": set(), "omitted": set()})
11912                         mykey = len(pkgmap) - 1
11913                         if unmerge_action=="unmerge":
11914                                         for y in mymatch:
11915                                                 if y not in all_selected:
11916                                                         pkgmap[mykey]["selected"].add(y)
11917                                                         all_selected.add(y)
11918                         elif unmerge_action == "prune":
11919                                 if len(mymatch) == 1:
11920                                         continue
11921                                 best_version = mymatch[0]
11922                                 best_slot = vartree.getslot(best_version)
11923                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11924                                 for mypkg in mymatch[1:]:
11925                                         myslot = vartree.getslot(mypkg)
11926                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11927                                         if (myslot == best_slot and mycounter > best_counter) or \
11928                                                 mypkg == portage.best([mypkg, best_version]):
11929                                                 if myslot == best_slot:
11930                                                         if mycounter < best_counter:
11931                                                                 # On slot collision, keep the one with the
11932                                                                 # highest counter since it is the most
11933                                                                 # recently installed.
11934                                                                 continue
11935                                                 best_version = mypkg
11936                                                 best_slot = myslot
11937                                                 best_counter = mycounter
11938                                 pkgmap[mykey]["protected"].add(best_version)
11939                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11940                                         if mypkg != best_version and mypkg not in all_selected)
11941                                 all_selected.update(pkgmap[mykey]["selected"])
11942                         else:
11943                                 # unmerge_action == "clean"
11944                                 slotmap={}
11945                                 for mypkg in mymatch:
11946                                         if unmerge_action == "clean":
11947                                                 myslot = localtree.getslot(mypkg)
11948                                         else:
11949                                                 # since we're pruning, we don't care about slots
11950                                                 # and put all the pkgs in together
11951                                                 myslot = 0
11952                                         if myslot not in slotmap:
11953                                                 slotmap[myslot] = {}
11954                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11955
11956                                 for mypkg in vartree.dbapi.cp_list(
11957                                         portage.dep_getkey(mymatch[0])):
11958                                         myslot = vartree.getslot(mypkg)
11959                                         if myslot not in slotmap:
11960                                                 slotmap[myslot] = {}
11961                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11962
11963                                 for myslot in slotmap:
11964                                         counterkeys = slotmap[myslot].keys()
11965                                         if not counterkeys:
11966                                                 continue
11967                                         counterkeys.sort()
11968                                         pkgmap[mykey]["protected"].add(
11969                                                 slotmap[myslot][counterkeys[-1]])
11970                                         del counterkeys[-1]
11971
11972                                         for counter in counterkeys[:]:
11973                                                 mypkg = slotmap[myslot][counter]
11974                                                 if mypkg not in mymatch:
11975                                                         counterkeys.remove(counter)
11976                                                         pkgmap[mykey]["protected"].add(
11977                                                                 slotmap[myslot][counter])
11978
11979                                         #be pretty and get them in order of merge:
11980                                         for ckey in counterkeys:
11981                                                 mypkg = slotmap[myslot][ckey]
11982                                                 if mypkg not in all_selected:
11983                                                         pkgmap[mykey]["selected"].add(mypkg)
11984                                                         all_selected.add(mypkg)
11985                                         # ok, now the last-merged package
11986                                         # is protected, and the rest are selected
11987                 numselected = len(all_selected)
11988                 if global_unmerge and not numselected:
11989                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11990                         return 0
11991         
11992                 if not numselected:
11993                         portage.writemsg_stdout(
11994                                 "\n>>> No packages selected for removal by " + \
11995                                 unmerge_action + "\n")
11996                         return 0
11997         finally:
11998                 if vdb_lock:
11999                         vartree.dbapi.flush_cache()
12000                         portage.locks.unlockdir(vdb_lock)
12001         
12002         from portage.sets.base import EditablePackageSet
12003         
12004         # generate a list of package sets that are directly or indirectly listed in "world",
12005         # as there is no persistent list of "installed" sets
12006         installed_sets = ["world"]
12007         stop = False
12008         pos = 0
12009         while not stop:
12010                 stop = True
12011                 pos = len(installed_sets)
12012                 for s in installed_sets[pos - 1:]:
12013                         if s not in sets:
12014                                 continue
12015                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12016                         if candidates:
12017                                 stop = False
12018                                 installed_sets += candidates
12019         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12020         del stop, pos
12021
12022         # we don't want to unmerge packages that are still listed in user-editable package sets
12023         # listed in "world" as they would be remerged on the next update of "world" or the 
12024         # relevant package sets.
12025         unknown_sets = set()
12026         for cp in xrange(len(pkgmap)):
12027                 for cpv in pkgmap[cp]["selected"].copy():
12028                         try:
12029                                 pkg = _pkg(cpv)
12030                         except KeyError:
12031                                 # It could have been uninstalled
12032                                 # by a concurrent process.
12033                                 continue
12034
12035                         if unmerge_action != "clean" and \
12036                                 root_config.root == "/" and \
12037                                 portage.match_from_list(
12038                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12039                                 msg = ("Not unmerging package %s since there is no valid " + \
12040                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12041                                 for line in textwrap.wrap(msg, 75):
12042                                         out.eerror(line)
12043                                 # adjust pkgmap so the display output is correct
12044                                 pkgmap[cp]["selected"].remove(cpv)
12045                                 all_selected.remove(cpv)
12046                                 pkgmap[cp]["protected"].add(cpv)
12047                                 continue
12048
12049                         parents = []
12050                         for s in installed_sets:
12051                                 # skip sets that the user requested to unmerge, and skip world 
12052                                 # unless we're unmerging a package set (as the package would be 
12053                                 # removed from "world" later on)
12054                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12055                                         continue
12056
12057                                 if s not in sets:
12058                                         if s in unknown_sets:
12059                                                 continue
12060                                         unknown_sets.add(s)
12061                                         out = portage.output.EOutput()
12062                                         out.eerror(("Unknown set '@%s' in " + \
12063                                                 "%svar/lib/portage/world_sets") % \
12064                                                 (s, root_config.root))
12065                                         continue
12066
12067                                 # only check instances of EditablePackageSet as other classes are generally used for
12068                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12069                                 # user can't do much about them anyway)
12070                                 if isinstance(sets[s], EditablePackageSet):
12071
12072                                         # This is derived from a snippet of code in the
12073                                         # depgraph._iter_atoms_for_pkg() method.
12074                                         for atom in sets[s].iterAtomsForPackage(pkg):
12075                                                 inst_matches = vartree.dbapi.match(atom)
12076                                                 inst_matches.reverse() # descending order
12077                                                 higher_slot = None
12078                                                 for inst_cpv in inst_matches:
12079                                                         try:
12080                                                                 inst_pkg = _pkg(inst_cpv)
12081                                                         except KeyError:
12082                                                                 # It could have been uninstalled
12083                                                                 # by a concurrent process.
12084                                                                 continue
12085
12086                                                         if inst_pkg.cp != atom.cp:
12087                                                                 continue
12088                                                         if pkg >= inst_pkg:
12089                                                                 # This is descending order, and we're not
12090                                                                 # interested in any versions <= pkg given.
12091                                                                 break
12092                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12093                                                                 higher_slot = inst_pkg
12094                                                                 break
12095                                                 if higher_slot is None:
12096                                                         parents.append(s)
12097                                                         break
12098                         if parents:
12099                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12100                                 #print colorize("WARN", "but still listed in the following package sets:")
12101                                 #print "    %s\n" % ", ".join(parents)
12102                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12103                                 print colorize("WARN", "still referenced by the following package sets:")
12104                                 print "    %s\n" % ", ".join(parents)
12105                                 # adjust pkgmap so the display output is correct
12106                                 pkgmap[cp]["selected"].remove(cpv)
12107                                 all_selected.remove(cpv)
12108                                 pkgmap[cp]["protected"].add(cpv)
12109         
12110         del installed_sets
12111
12112         numselected = len(all_selected)
12113         if not numselected:
12114                 writemsg_level(
12115                         "\n>>> No packages selected for removal by " + \
12116                         unmerge_action + "\n")
12117                 return 0
12118
12119         # Unmerge order only matters in some cases
12120         if not ordered:
12121                 unordered = {}
12122                 for d in pkgmap:
12123                         selected = d["selected"]
12124                         if not selected:
12125                                 continue
12126                         cp = portage.cpv_getkey(iter(selected).next())
12127                         cp_dict = unordered.get(cp)
12128                         if cp_dict is None:
12129                                 cp_dict = {}
12130                                 unordered[cp] = cp_dict
12131                                 for k in d:
12132                                         cp_dict[k] = set()
12133                         for k, v in d.iteritems():
12134                                 cp_dict[k].update(v)
12135                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12136
12137         for x in xrange(len(pkgmap)):
12138                 selected = pkgmap[x]["selected"]
12139                 if not selected:
12140                         continue
12141                 for mytype, mylist in pkgmap[x].iteritems():
12142                         if mytype == "selected":
12143                                 continue
12144                         mylist.difference_update(all_selected)
12145                 cp = portage.cpv_getkey(iter(selected).next())
12146                 for y in localtree.dep_match(cp):
12147                         if y not in pkgmap[x]["omitted"] and \
12148                                 y not in pkgmap[x]["selected"] and \
12149                                 y not in pkgmap[x]["protected"] and \
12150                                 y not in all_selected:
12151                                 pkgmap[x]["omitted"].add(y)
12152                 if global_unmerge and not pkgmap[x]["selected"]:
12153                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12154                         continue
12155                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12156                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12157                                 "'%s' is part of your system profile.\n" % cp),
12158                                 level=logging.WARNING, noiselevel=-1)
12159                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12160                                 "be damaging to your system.\n\n"),
12161                                 level=logging.WARNING, noiselevel=-1)
12162                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12163                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12164                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12165                 if not quiet:
12166                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12167                 else:
12168                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12169                 for mytype in ["selected","protected","omitted"]:
12170                         if not quiet:
12171                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12172                         if pkgmap[x][mytype]:
12173                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12174                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12175                                 for pn, ver, rev in sorted_pkgs:
12176                                         if rev == "r0":
12177                                                 myversion = ver
12178                                         else:
12179                                                 myversion = ver + "-" + rev
12180                                         if mytype == "selected":
12181                                                 writemsg_level(
12182                                                         colorize("UNMERGE_WARN", myversion + " "),
12183                                                         noiselevel=-1)
12184                                         else:
12185                                                 writemsg_level(
12186                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12187                         else:
12188                                 writemsg_level("none ", noiselevel=-1)
12189                         if not quiet:
12190                                 writemsg_level("\n", noiselevel=-1)
12191                 if quiet:
12192                         writemsg_level("\n", noiselevel=-1)
12193
12194         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12195                 " packages are slated for removal.\n")
12196         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12197                         " and " + colorize("GOOD", "'omitted'") + \
12198                         " packages will not be removed.\n\n")
12199
12200         if "--pretend" in myopts:
12201                 #we're done... return
12202                 return 0
12203         if "--ask" in myopts:
12204                 if userquery("Would you like to unmerge these packages?")=="No":
12205                         # enter pretend mode for correct formatting of results
12206                         myopts["--pretend"] = True
12207                         print
12208                         print "Quitting."
12209                         print
12210                         return 0
12211         #the real unmerging begins, after a short delay....
12212         if clean_delay and not autoclean:
12213                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12214
12215         for x in xrange(len(pkgmap)):
12216                 for y in pkgmap[x]["selected"]:
12217                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12218                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12219                         mysplit = y.split("/")
12220                         #unmerge...
12221                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12222                                 mysettings, unmerge_action not in ["clean","prune"],
12223                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12224                                 scheduler=scheduler)
12225
12226                         if retval != os.EX_OK:
12227                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12228                                 if raise_on_error:
12229                                         raise UninstallFailure(retval)
12230                                 sys.exit(retval)
12231                         else:
12232                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12233                                         sets["world"].cleanPackage(vartree.dbapi, y)
12234                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12235         if clean_world and hasattr(sets["world"], "remove"):
12236                 for s in root_config.setconfig.active:
12237                         sets["world"].remove(SETPREFIX+s)
12238         return 1
12239
12240 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12241
12242         if os.path.exists("/usr/bin/install-info"):
12243                 out = portage.output.EOutput()
12244                 regen_infodirs=[]
12245                 for z in infodirs:
12246                         if z=='':
12247                                 continue
12248                         inforoot=normpath(root+z)
12249                         if os.path.isdir(inforoot):
12250                                 infomtime = long(os.stat(inforoot).st_mtime)
12251                                 if inforoot not in prev_mtimes or \
12252                                         prev_mtimes[inforoot] != infomtime:
12253                                                 regen_infodirs.append(inforoot)
12254
12255                 if not regen_infodirs:
12256                         portage.writemsg_stdout("\n")
12257                         out.einfo("GNU info directory index is up-to-date.")
12258                 else:
12259                         portage.writemsg_stdout("\n")
12260                         out.einfo("Regenerating GNU info directory index...")
12261
12262                         dir_extensions = ("", ".gz", ".bz2")
12263                         icount=0
12264                         badcount=0
12265                         errmsg = ""
12266                         for inforoot in regen_infodirs:
12267                                 if inforoot=='':
12268                                         continue
12269
12270                                 if not os.path.isdir(inforoot) or \
12271                                         not os.access(inforoot, os.W_OK):
12272                                         continue
12273
12274                                 file_list = os.listdir(inforoot)
12275                                 file_list.sort()
12276                                 dir_file = os.path.join(inforoot, "dir")
12277                                 moved_old_dir = False
12278                                 processed_count = 0
12279                                 for x in file_list:
12280                                         if x.startswith(".") or \
12281                                                 os.path.isdir(os.path.join(inforoot, x)):
12282                                                 continue
12283                                         if x.startswith("dir"):
12284                                                 skip = False
12285                                                 for ext in dir_extensions:
12286                                                         if x == "dir" + ext or \
12287                                                                 x == "dir" + ext + ".old":
12288                                                                 skip = True
12289                                                                 break
12290                                                 if skip:
12291                                                         continue
12292                                         if processed_count == 0:
12293                                                 for ext in dir_extensions:
12294                                                         try:
12295                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12296                                                                 moved_old_dir = True
12297                                                         except EnvironmentError, e:
12298                                                                 if e.errno != errno.ENOENT:
12299                                                                         raise
12300                                                                 del e
12301                                         processed_count += 1
12302                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12303                                         existsstr="already exists, for file `"
12304                                         if myso!="":
12305                                                 if re.search(existsstr,myso):
12306                                                         # Already exists... Don't increment the count for this.
12307                                                         pass
12308                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12309                                                         # This info file doesn't contain a DIR-header: install-info produces this
12310                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12311                                                         # Don't increment the count for this.
12312                                                         pass
12313                                                 else:
12314                                                         badcount=badcount+1
12315                                                         errmsg += myso + "\n"
12316                                         icount=icount+1
12317
12318                                 if moved_old_dir and not os.path.exists(dir_file):
12319                                         # We didn't generate a new dir file, so put the old file
12320                                         # back where it was originally found.
12321                                         for ext in dir_extensions:
12322                                                 try:
12323                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12324                                                 except EnvironmentError, e:
12325                                                         if e.errno != errno.ENOENT:
12326                                                                 raise
12327                                                         del e
12328
12329                                 # Clean dir.old cruft so that they don't prevent
12330                                 # unmerge of otherwise empty directories.
12331                                 for ext in dir_extensions:
12332                                         try:
12333                                                 os.unlink(dir_file + ext + ".old")
12334                                         except EnvironmentError, e:
12335                                                 if e.errno != errno.ENOENT:
12336                                                         raise
12337                                                 del e
12338
12339                                 #update mtime so we can potentially avoid regenerating.
12340                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12341
12342                         if badcount:
12343                                 out.eerror("Processed %d info files; %d errors." % \
12344                                         (icount, badcount))
12345                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12346                         else:
12347                                 if icount > 0:
12348                                         out.einfo("Processed %d info files." % (icount,))
12349
12350
12351 def display_news_notification(root_config, myopts):
12352         target_root = root_config.root
12353         trees = root_config.trees
12354         settings = trees["vartree"].settings
12355         portdb = trees["porttree"].dbapi
12356         vardb = trees["vartree"].dbapi
12357         NEWS_PATH = os.path.join("metadata", "news")
12358         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12359         newsReaderDisplay = False
12360         update = "--pretend" not in myopts
12361
12362         for repo in portdb.getRepositories():
12363                 unreadItems = checkUpdatedNewsItems(
12364                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12365                 if unreadItems:
12366                         if not newsReaderDisplay:
12367                                 newsReaderDisplay = True
12368                                 print
12369                         print colorize("WARN", " * IMPORTANT:"),
12370                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12371                         
12372         
12373         if newsReaderDisplay:
12374                 print colorize("WARN", " *"),
12375                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12376                 print
12377
12378 def display_preserved_libs(vardbapi):
12379         MAX_DISPLAY = 3
12380
12381         # Ensure the registry is consistent with existing files.
12382         vardbapi.plib_registry.pruneNonExisting()
12383
12384         if vardbapi.plib_registry.hasEntries():
12385                 print
12386                 print colorize("WARN", "!!!") + " existing preserved libs:"
12387                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12388                 linkmap = vardbapi.linkmap
12389                 consumer_map = {}
12390                 owners = {}
12391                 linkmap_broken = False
12392
12393                 try:
12394                         linkmap.rebuild()
12395                 except portage.exception.CommandNotFound, e:
12396                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12397                                 level=logging.ERROR, noiselevel=-1)
12398                         del e
12399                         linkmap_broken = True
12400                 else:
12401                         search_for_owners = set()
12402                         for cpv in plibdata:
12403                                 internal_plib_keys = set(linkmap._obj_key(f) \
12404                                         for f in plibdata[cpv])
12405                                 for f in plibdata[cpv]:
12406                                         if f in consumer_map:
12407                                                 continue
12408                                         consumers = []
12409                                         for c in linkmap.findConsumers(f):
12410                                                 # Filter out any consumers that are also preserved libs
12411                                                 # belonging to the same package as the provider.
12412                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12413                                                         consumers.append(c)
12414                                         consumers.sort()
12415                                         consumer_map[f] = consumers
12416                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12417
12418                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12419
12420                 for cpv in plibdata:
12421                         print colorize("WARN", ">>>") + " package: %s" % cpv
12422                         samefile_map = {}
12423                         for f in plibdata[cpv]:
12424                                 obj_key = linkmap._obj_key(f)
12425                                 alt_paths = samefile_map.get(obj_key)
12426                                 if alt_paths is None:
12427                                         alt_paths = set()
12428                                         samefile_map[obj_key] = alt_paths
12429                                 alt_paths.add(f)
12430
12431                         for alt_paths in samefile_map.itervalues():
12432                                 alt_paths = sorted(alt_paths)
12433                                 for p in alt_paths:
12434                                         print colorize("WARN", " * ") + " - %s" % (p,)
12435                                 f = alt_paths[0]
12436                                 consumers = consumer_map.get(f, [])
12437                                 for c in consumers[:MAX_DISPLAY]:
12438                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12439                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12440                                 if len(consumers) == MAX_DISPLAY + 1:
12441                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12442                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12443                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12444                                 elif len(consumers) > MAX_DISPLAY:
12445                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12446                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12447
12448
12449 def _flush_elog_mod_echo():
12450         """
12451         Dump the mod_echo output now so that our other
12452         notifications are shown last.
12453         @rtype: bool
12454         @returns: True if messages were shown, False otherwise.
12455         """
12456         messages_shown = False
12457         try:
12458                 from portage.elog import mod_echo
12459         except ImportError:
12460                 pass # happens during downgrade to a version without the module
12461         else:
12462                 messages_shown = bool(mod_echo._items)
12463                 mod_echo.finalize()
12464         return messages_shown
12465
12466 def post_emerge(root_config, myopts, mtimedb, retval):
12467         """
12468         Misc. things to run at the end of a merge session.
12469         
12470         Update Info Files
12471         Update Config Files
12472         Update News Items
12473         Commit mtimeDB
12474         Display preserved libs warnings
12475         Exit Emerge
12476
12477         @param trees: A dictionary mapping each ROOT to it's package databases
12478         @type trees: dict
12479         @param mtimedb: The mtimeDB to store data needed across merge invocations
12480         @type mtimedb: MtimeDB class instance
12481         @param retval: Emerge's return value
12482         @type retval: Int
12483         @rype: None
12484         @returns:
12485         1.  Calls sys.exit(retval)
12486         """
12487
12488         target_root = root_config.root
12489         trees = { target_root : root_config.trees }
12490         vardbapi = trees[target_root]["vartree"].dbapi
12491         settings = vardbapi.settings
12492         info_mtimes = mtimedb["info"]
12493
12494         # Load the most current variables from ${ROOT}/etc/profile.env
12495         settings.unlock()
12496         settings.reload()
12497         settings.regenerate()
12498         settings.lock()
12499
12500         config_protect = settings.get("CONFIG_PROTECT","").split()
12501         infodirs = settings.get("INFOPATH","").split(":") + \
12502                 settings.get("INFODIR","").split(":")
12503
12504         os.chdir("/")
12505
12506         if retval == os.EX_OK:
12507                 exit_msg = " *** exiting successfully."
12508         else:
12509                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12510         emergelog("notitles" not in settings.features, exit_msg)
12511
12512         _flush_elog_mod_echo()
12513
12514         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12515         if "--pretend" in myopts or (counter_hash is not None and \
12516                 counter_hash == vardbapi._counter_hash()):
12517                 display_news_notification(root_config, myopts)
12518                 # If vdb state has not changed then there's nothing else to do.
12519                 sys.exit(retval)
12520
12521         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12522         portage.util.ensure_dirs(vdb_path)
12523         vdb_lock = None
12524         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12525                 vdb_lock = portage.locks.lockdir(vdb_path)
12526
12527         if vdb_lock:
12528                 try:
12529                         if "noinfo" not in settings.features:
12530                                 chk_updated_info_files(target_root,
12531                                         infodirs, info_mtimes, retval)
12532                         mtimedb.commit()
12533                 finally:
12534                         if vdb_lock:
12535                                 portage.locks.unlockdir(vdb_lock)
12536
12537         chk_updated_cfg_files(target_root, config_protect)
12538         
12539         display_news_notification(root_config, myopts)
12540         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12541                 display_preserved_libs(vardbapi)        
12542
12543         sys.exit(retval)
12544
12545
12546 def chk_updated_cfg_files(target_root, config_protect):
12547         if config_protect:
12548                 #number of directories with some protect files in them
12549                 procount=0
12550                 for x in config_protect:
12551                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12552                         if not os.access(x, os.W_OK):
12553                                 # Avoid Permission denied errors generated
12554                                 # later by `find`.
12555                                 continue
12556                         try:
12557                                 mymode = os.lstat(x).st_mode
12558                         except OSError:
12559                                 continue
12560                         if stat.S_ISLNK(mymode):
12561                                 # We want to treat it like a directory if it
12562                                 # is a symlink to an existing directory.
12563                                 try:
12564                                         real_mode = os.stat(x).st_mode
12565                                         if stat.S_ISDIR(real_mode):
12566                                                 mymode = real_mode
12567                                 except OSError:
12568                                         pass
12569                         if stat.S_ISDIR(mymode):
12570                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12571                         else:
12572                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12573                                         os.path.split(x.rstrip(os.path.sep))
12574                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12575                         a = commands.getstatusoutput(mycommand)
12576                         if a[0] != 0:
12577                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12578                                 sys.stderr.flush()
12579                                 # Show the error message alone, sending stdout to /dev/null.
12580                                 os.system(mycommand + " 1>/dev/null")
12581                         else:
12582                                 files = a[1].split('\0')
12583                                 # split always produces an empty string as the last element
12584                                 if files and not files[-1]:
12585                                         del files[-1]
12586                                 if files:
12587                                         procount += 1
12588                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12589                                         if stat.S_ISDIR(mymode):
12590                                                  print "%d config files in '%s' need updating." % \
12591                                                         (len(files), x)
12592                                         else:
12593                                                  print "config file '%s' needs updating." % x
12594
12595                 if procount:
12596                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12597                                 " section of the " + bold("emerge")
12598                         print " "+yellow("*")+" man page to learn how to update config files."
12599
12600 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12601         update=False):
12602         """
12603         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12604         Returns the number of unread (yet relevent) items.
12605         
12606         @param portdb: a portage tree database
12607         @type portdb: pordbapi
12608         @param vardb: an installed package database
12609         @type vardb: vardbapi
12610         @param NEWS_PATH:
12611         @type NEWS_PATH:
12612         @param UNREAD_PATH:
12613         @type UNREAD_PATH:
12614         @param repo_id:
12615         @type repo_id:
12616         @rtype: Integer
12617         @returns:
12618         1.  The number of unread but relevant news items.
12619         
12620         """
12621         from portage.news import NewsManager
12622         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12623         return manager.getUnreadItems( repo_id, update=update )
12624
12625 def insert_category_into_atom(atom, category):
12626         alphanum = re.search(r'\w', atom)
12627         if alphanum:
12628                 ret = atom[:alphanum.start()] + "%s/" % category + \
12629                         atom[alphanum.start():]
12630         else:
12631                 ret = None
12632         return ret
12633
12634 def is_valid_package_atom(x):
12635         if "/" not in x:
12636                 alphanum = re.search(r'\w', x)
12637                 if alphanum:
12638                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12639         return portage.isvalidatom(x)
12640
12641 def show_blocker_docs_link():
12642         print
12643         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12644         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12645         print
12646         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12647         print
12648
12649 def show_mask_docs():
12650         print "For more information, see the MASKED PACKAGES section in the emerge"
12651         print "man page or refer to the Gentoo Handbook."
12652
12653 def action_sync(settings, trees, mtimedb, myopts, myaction):
12654         xterm_titles = "notitles" not in settings.features
12655         emergelog(xterm_titles, " === sync")
12656         myportdir = settings.get("PORTDIR", None)
12657         out = portage.output.EOutput()
12658         if not myportdir:
12659                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12660                 sys.exit(1)
12661         if myportdir[-1]=="/":
12662                 myportdir=myportdir[:-1]
12663         try:
12664                 st = os.stat(myportdir)
12665         except OSError:
12666                 st = None
12667         if st is None:
12668                 print ">>>",myportdir,"not found, creating it."
12669                 os.makedirs(myportdir,0755)
12670                 st = os.stat(myportdir)
12671
12672         spawn_kwargs = {}
12673         spawn_kwargs["env"] = settings.environ()
12674         if 'usersync' in settings.features and \
12675                 portage.data.secpass >= 2 and \
12676                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12677                 st.st_gid != os.getgid() and st.st_mode & 0070):
12678                 try:
12679                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12680                 except KeyError:
12681                         pass
12682                 else:
12683                         # Drop privileges when syncing, in order to match
12684                         # existing uid/gid settings.
12685                         spawn_kwargs["uid"]    = st.st_uid
12686                         spawn_kwargs["gid"]    = st.st_gid
12687                         spawn_kwargs["groups"] = [st.st_gid]
12688                         spawn_kwargs["env"]["HOME"] = homedir
12689                         umask = 0002
12690                         if not st.st_mode & 0020:
12691                                 umask = umask | 0020
12692                         spawn_kwargs["umask"] = umask
12693
12694         syncuri = settings.get("SYNC", "").strip()
12695         if not syncuri:
12696                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12697                         noiselevel=-1, level=logging.ERROR)
12698                 return 1
12699
12700         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12701         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12702
12703         os.umask(0022)
12704         dosyncuri = syncuri
12705         updatecache_flg = False
12706         if myaction == "metadata":
12707                 print "skipping sync"
12708                 updatecache_flg = True
12709         elif ".git" in vcs_dirs:
12710                 # Update existing git repository, and ignore the syncuri. We are
12711                 # going to trust the user and assume that the user is in the branch
12712                 # that he/she wants updated. We'll let the user manage branches with
12713                 # git directly.
12714                 if portage.process.find_binary("git") is None:
12715                         msg = ["Command not found: git",
12716                         "Type \"emerge dev-util/git\" to enable git support."]
12717                         for l in msg:
12718                                 writemsg_level("!!! %s\n" % l,
12719                                         level=logging.ERROR, noiselevel=-1)
12720                         return 1
12721                 msg = ">>> Starting git pull in %s..." % myportdir
12722                 emergelog(xterm_titles, msg )
12723                 writemsg_level(msg + "\n")
12724                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12725                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12726                 if exitcode != os.EX_OK:
12727                         msg = "!!! git pull error in %s." % myportdir
12728                         emergelog(xterm_titles, msg)
12729                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12730                         return exitcode
12731                 msg = ">>> Git pull in %s successful" % myportdir
12732                 emergelog(xterm_titles, msg)
12733                 writemsg_level(msg + "\n")
12734                 exitcode = git_sync_timestamps(settings, myportdir)
12735                 if exitcode == os.EX_OK:
12736                         updatecache_flg = True
12737         elif syncuri[:8]=="rsync://":
12738                 for vcs_dir in vcs_dirs:
12739                         writemsg_level(("!!! %s appears to be under revision " + \
12740                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12741                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12742                         return 1
12743                 if not os.path.exists("/usr/bin/rsync"):
12744                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12745                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12746                         sys.exit(1)
12747                 mytimeout=180
12748
12749                 rsync_opts = []
12750                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12751                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12752                         rsync_opts.extend([
12753                                 "--recursive",    # Recurse directories
12754                                 "--links",        # Consider symlinks
12755                                 "--safe-links",   # Ignore links outside of tree
12756                                 "--perms",        # Preserve permissions
12757                                 "--times",        # Preserive mod times
12758                                 "--compress",     # Compress the data transmitted
12759                                 "--force",        # Force deletion on non-empty dirs
12760                                 "--whole-file",   # Don't do block transfers, only entire files
12761                                 "--delete",       # Delete files that aren't in the master tree
12762                                 "--stats",        # Show final statistics about what was transfered
12763                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12764                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12765                                 "--exclude=/local",       # Exclude local     from consideration
12766                                 "--exclude=/packages",    # Exclude packages  from consideration
12767                         ])
12768
12769                 else:
12770                         # The below validation is not needed when using the above hardcoded
12771                         # defaults.
12772
12773                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12774                         rsync_opts.extend(
12775                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12776                         for opt in ("--recursive", "--times"):
12777                                 if opt not in rsync_opts:
12778                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12779                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12780                                         rsync_opts.append(opt)
12781         
12782                         for exclude in ("distfiles", "local", "packages"):
12783                                 opt = "--exclude=/%s" % exclude
12784                                 if opt not in rsync_opts:
12785                                         portage.writemsg(yellow("WARNING:") + \
12786                                         " adding required option %s not included in "  % opt + \
12787                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12788                                         rsync_opts.append(opt)
12789         
12790                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12791                                 def rsync_opt_startswith(opt_prefix):
12792                                         for x in rsync_opts:
12793                                                 if x.startswith(opt_prefix):
12794                                                         return True
12795                                         return False
12796
12797                                 if not rsync_opt_startswith("--timeout="):
12798                                         rsync_opts.append("--timeout=%d" % mytimeout)
12799
12800                                 for opt in ("--compress", "--whole-file"):
12801                                         if opt not in rsync_opts:
12802                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12803                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12804                                                 rsync_opts.append(opt)
12805
12806                 if "--quiet" in myopts:
12807                         rsync_opts.append("--quiet")    # Shut up a lot
12808                 else:
12809                         rsync_opts.append("--verbose")  # Print filelist
12810
12811                 if "--verbose" in myopts:
12812                         rsync_opts.append("--progress")  # Progress meter for each file
12813
12814                 if "--debug" in myopts:
12815                         rsync_opts.append("--checksum") # Force checksum on all files
12816
12817                 # Real local timestamp file.
12818                 servertimestampfile = os.path.join(
12819                         myportdir, "metadata", "timestamp.chk")
12820
12821                 content = portage.util.grabfile(servertimestampfile)
12822                 mytimestamp = 0
12823                 if content:
12824                         try:
12825                                 mytimestamp = time.mktime(time.strptime(content[0],
12826                                         "%a, %d %b %Y %H:%M:%S +0000"))
12827                         except (OverflowError, ValueError):
12828                                 pass
12829                 del content
12830
12831                 try:
12832                         rsync_initial_timeout = \
12833                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12834                 except ValueError:
12835                         rsync_initial_timeout = 15
12836
12837                 try:
12838                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12839                 except SystemExit, e:
12840                         raise # Needed else can't exit
12841                 except:
12842                         maxretries=3 #default number of retries
12843
12844                 retries=0
12845                 user_name, hostname, port = re.split(
12846                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12847                 if port is None:
12848                         port=""
12849                 if user_name is None:
12850                         user_name=""
12851                 updatecache_flg=True
12852                 all_rsync_opts = set(rsync_opts)
12853                 extra_rsync_opts = shlex.split(
12854                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12855                 all_rsync_opts.update(extra_rsync_opts)
12856                 family = socket.AF_INET
12857                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12858                         family = socket.AF_INET
12859                 elif socket.has_ipv6 and \
12860                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12861                         family = socket.AF_INET6
12862                 ips=[]
12863                 SERVER_OUT_OF_DATE = -1
12864                 EXCEEDED_MAX_RETRIES = -2
12865                 while (1):
12866                         if ips:
12867                                 del ips[0]
12868                         if ips==[]:
12869                                 try:
12870                                         for addrinfo in socket.getaddrinfo(
12871                                                 hostname, None, family, socket.SOCK_STREAM):
12872                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12873                                                         # IPv6 addresses need to be enclosed in square brackets
12874                                                         ips.append("[%s]" % addrinfo[4][0])
12875                                                 else:
12876                                                         ips.append(addrinfo[4][0])
12877                                         from random import shuffle
12878                                         shuffle(ips)
12879                                 except SystemExit, e:
12880                                         raise # Needed else can't exit
12881                                 except Exception, e:
12882                                         print "Notice:",str(e)
12883                                         dosyncuri=syncuri
12884
12885                         if ips:
12886                                 try:
12887                                         dosyncuri = syncuri.replace(
12888                                                 "//" + user_name + hostname + port + "/",
12889                                                 "//" + user_name + ips[0] + port + "/", 1)
12890                                 except SystemExit, e:
12891                                         raise # Needed else can't exit
12892                                 except Exception, e:
12893                                         print "Notice:",str(e)
12894                                         dosyncuri=syncuri
12895
12896                         if (retries==0):
12897                                 if "--ask" in myopts:
12898                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12899                                                 print
12900                                                 print "Quitting."
12901                                                 print
12902                                                 sys.exit(0)
12903                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12904                                 if "--quiet" not in myopts:
12905                                         print ">>> Starting rsync with "+dosyncuri+"..."
12906                         else:
12907                                 emergelog(xterm_titles,
12908                                         ">>> Starting retry %d of %d with %s" % \
12909                                                 (retries,maxretries,dosyncuri))
12910                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12911
12912                         if mytimestamp != 0 and "--quiet" not in myopts:
12913                                 print ">>> Checking server timestamp ..."
12914
12915                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12916
12917                         if "--debug" in myopts:
12918                                 print rsynccommand
12919
12920                         exitcode = os.EX_OK
12921                         servertimestamp = 0
12922                         # Even if there's no timestamp available locally, fetch the
12923                         # timestamp anyway as an initial probe to verify that the server is
12924                         # responsive.  This protects us from hanging indefinitely on a
12925                         # connection attempt to an unresponsive server which rsync's
12926                         # --timeout option does not prevent.
12927                         if True:
12928                                 # Temporary file for remote server timestamp comparison.
12929                                 from tempfile import mkstemp
12930                                 fd, tmpservertimestampfile = mkstemp()
12931                                 os.close(fd)
12932                                 mycommand = rsynccommand[:]
12933                                 mycommand.append(dosyncuri.rstrip("/") + \
12934                                         "/metadata/timestamp.chk")
12935                                 mycommand.append(tmpservertimestampfile)
12936                                 content = None
12937                                 mypids = []
12938                                 try:
12939                                         def timeout_handler(signum, frame):
12940                                                 raise portage.exception.PortageException("timed out")
12941                                         signal.signal(signal.SIGALRM, timeout_handler)
12942                                         # Timeout here in case the server is unresponsive.  The
12943                                         # --timeout rsync option doesn't apply to the initial
12944                                         # connection attempt.
12945                                         if rsync_initial_timeout:
12946                                                 signal.alarm(rsync_initial_timeout)
12947                                         try:
12948                                                 mypids.extend(portage.process.spawn(
12949                                                         mycommand, env=settings.environ(), returnpid=True))
12950                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12951                                                 content = portage.grabfile(tmpservertimestampfile)
12952                                         finally:
12953                                                 if rsync_initial_timeout:
12954                                                         signal.alarm(0)
12955                                                 try:
12956                                                         os.unlink(tmpservertimestampfile)
12957                                                 except OSError:
12958                                                         pass
12959                                 except portage.exception.PortageException, e:
12960                                         # timed out
12961                                         print e
12962                                         del e
12963                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12964                                                 os.kill(mypids[0], signal.SIGTERM)
12965                                                 os.waitpid(mypids[0], 0)
12966                                         # This is the same code rsync uses for timeout.
12967                                         exitcode = 30
12968                                 else:
12969                                         if exitcode != os.EX_OK:
12970                                                 if exitcode & 0xff:
12971                                                         exitcode = (exitcode & 0xff) << 8
12972                                                 else:
12973                                                         exitcode = exitcode >> 8
12974                                 if mypids:
12975                                         portage.process.spawned_pids.remove(mypids[0])
12976                                 if content:
12977                                         try:
12978                                                 servertimestamp = time.mktime(time.strptime(
12979                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12980                                         except (OverflowError, ValueError):
12981                                                 pass
12982                                 del mycommand, mypids, content
12983                         if exitcode == os.EX_OK:
12984                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12985                                         emergelog(xterm_titles,
12986                                                 ">>> Cancelling sync -- Already current.")
12987                                         print
12988                                         print ">>>"
12989                                         print ">>> Timestamps on the server and in the local repository are the same."
12990                                         print ">>> Cancelling all further sync action. You are already up to date."
12991                                         print ">>>"
12992                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12993                                         print ">>>"
12994                                         print
12995                                         sys.exit(0)
12996                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12997                                         emergelog(xterm_titles,
12998                                                 ">>> Server out of date: %s" % dosyncuri)
12999                                         print
13000                                         print ">>>"
13001                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13002                                         print ">>>"
13003                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13004                                         print ">>>"
13005                                         print
13006                                         exitcode = SERVER_OUT_OF_DATE
13007                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13008                                         # actual sync
13009                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13010                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13011                                         if exitcode in [0,1,3,4,11,14,20,21]:
13012                                                 break
13013                         elif exitcode in [1,3,4,11,14,20,21]:
13014                                 break
13015                         else:
13016                                 # Code 2 indicates protocol incompatibility, which is expected
13017                                 # for servers with protocol < 29 that don't support
13018                                 # --prune-empty-directories.  Retry for a server that supports
13019                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13020                                 pass
13021
13022                         retries=retries+1
13023
13024                         if retries<=maxretries:
13025                                 print ">>> Retrying..."
13026                                 time.sleep(11)
13027                         else:
13028                                 # over retries
13029                                 # exit loop
13030                                 updatecache_flg=False
13031                                 exitcode = EXCEEDED_MAX_RETRIES
13032                                 break
13033
13034                 if (exitcode==0):
13035                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13036                 elif exitcode == SERVER_OUT_OF_DATE:
13037                         sys.exit(1)
13038                 elif exitcode == EXCEEDED_MAX_RETRIES:
13039                         sys.stderr.write(
13040                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13041                         sys.exit(1)
13042                 elif (exitcode>0):
13043                         msg = []
13044                         if exitcode==1:
13045                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13046                                 msg.append("that your SYNC statement is proper.")
13047                                 msg.append("SYNC=" + settings["SYNC"])
13048                         elif exitcode==11:
13049                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13050                                 msg.append("this means your disk is full, but can be caused by corruption")
13051                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13052                                 msg.append("and try again after the problem has been fixed.")
13053                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13054                         elif exitcode==20:
13055                                 msg.append("Rsync was killed before it finished.")
13056                         else:
13057                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13058                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13059                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13060                                 msg.append("temporary problem unless complications exist with your network")
13061                                 msg.append("(and possibly your system's filesystem) configuration.")
13062                         for line in msg:
13063                                 out.eerror(line)
13064                         sys.exit(exitcode)
13065         elif syncuri[:6]=="cvs://":
13066                 if not os.path.exists("/usr/bin/cvs"):
13067                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13068                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13069                         sys.exit(1)
13070                 cvsroot=syncuri[6:]
13071                 cvsdir=os.path.dirname(myportdir)
13072                 if not os.path.exists(myportdir+"/CVS"):
13073                         #initial checkout
13074                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13075                         if os.path.exists(cvsdir+"/gentoo-x86"):
13076                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13077                                 sys.exit(1)
13078                         try:
13079                                 os.rmdir(myportdir)
13080                         except OSError, e:
13081                                 if e.errno != errno.ENOENT:
13082                                         sys.stderr.write(
13083                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13084                                         sys.exit(1)
13085                                 del e
13086                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13087                                 print "!!! cvs checkout error; exiting."
13088                                 sys.exit(1)
13089                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13090                 else:
13091                         #cvs update
13092                         print ">>> Starting cvs update with "+syncuri+"..."
13093                         retval = portage.process.spawn_bash(
13094                                 "cd %s; cvs -z0 -q update -dP" % \
13095                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13096                         if retval != os.EX_OK:
13097                                 sys.exit(retval)
13098                 dosyncuri = syncuri
13099         else:
13100                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13101                         noiselevel=-1, level=logging.ERROR)
13102                 return 1
13103
13104         if updatecache_flg and  \
13105                 myaction != "metadata" and \
13106                 "metadata-transfer" not in settings.features:
13107                 updatecache_flg = False
13108
13109         # Reload the whole config from scratch.
13110         settings, trees, mtimedb = load_emerge_config(trees=trees)
13111         root_config = trees[settings["ROOT"]]["root_config"]
13112         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13113
13114         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13115                 action_metadata(settings, portdb, myopts)
13116
13117         if portage._global_updates(trees, mtimedb["updates"]):
13118                 mtimedb.commit()
13119                 # Reload the whole config from scratch.
13120                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13121                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13122                 root_config = trees[settings["ROOT"]]["root_config"]
13123
13124         mybestpv = portdb.xmatch("bestmatch-visible",
13125                 portage.const.PORTAGE_PACKAGE_ATOM)
13126         mypvs = portage.best(
13127                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13128                 portage.const.PORTAGE_PACKAGE_ATOM))
13129
13130         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13131
13132         if myaction != "metadata":
13133                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13134                         retval = portage.process.spawn(
13135                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13136                                 dosyncuri], env=settings.environ())
13137                         if retval != os.EX_OK:
13138                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13139
13140         if(mybestpv != mypvs) and not "--quiet" in myopts:
13141                 print
13142                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13143                 print red(" * ")+"that you update portage now, before any other packages are updated."
13144                 print
13145                 print red(" * ")+"To update portage, run 'emerge portage' now."
13146                 print
13147         
13148         display_news_notification(root_config, myopts)
13149         return os.EX_OK
13150
13151 def git_sync_timestamps(settings, portdir):
13152         """
13153         Since git doesn't preserve timestamps, synchronize timestamps between
13154         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13155         for a given file as long as the file in the working tree is not modified
13156         (relative to HEAD).
13157         """
13158         cache_dir = os.path.join(portdir, "metadata", "cache")
13159         if not os.path.isdir(cache_dir):
13160                 return os.EX_OK
13161         writemsg_level(">>> Synchronizing timestamps...\n")
13162
13163         from portage.cache.cache_errors import CacheError
13164         try:
13165                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13166                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13167         except CacheError, e:
13168                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13169                         level=logging.ERROR, noiselevel=-1)
13170                 return 1
13171
13172         ec_dir = os.path.join(portdir, "eclass")
13173         try:
13174                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13175                         if f.endswith(".eclass"))
13176         except OSError, e:
13177                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13178                         level=logging.ERROR, noiselevel=-1)
13179                 return 1
13180
13181         args = [portage.const.BASH_BINARY, "-c",
13182                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13183                 portage._shell_quote(portdir)]
13184         import subprocess
13185         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13186         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13187         rval = proc.wait()
13188         if rval != os.EX_OK:
13189                 return rval
13190
13191         modified_eclasses = set(ec for ec in ec_names \
13192                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13193
13194         updated_ec_mtimes = {}
13195
13196         for cpv in cache_db:
13197                 cpv_split = portage.catpkgsplit(cpv)
13198                 if cpv_split is None:
13199                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13200                                 level=logging.ERROR, noiselevel=-1)
13201                         continue
13202
13203                 cat, pn, ver, rev = cpv_split
13204                 cat, pf = portage.catsplit(cpv)
13205                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13206                 if relative_eb_path in modified_files:
13207                         continue
13208
13209                 try:
13210                         cache_entry = cache_db[cpv]
13211                         eb_mtime = cache_entry.get("_mtime_")
13212                         ec_mtimes = cache_entry.get("_eclasses_")
13213                 except KeyError:
13214                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13215                                 level=logging.ERROR, noiselevel=-1)
13216                         continue
13217                 except CacheError, e:
13218                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13219                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13220                         continue
13221
13222                 if eb_mtime is None:
13223                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13224                                 level=logging.ERROR, noiselevel=-1)
13225                         continue
13226
13227                 try:
13228                         eb_mtime = long(eb_mtime)
13229                 except ValueError:
13230                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13231                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13232                         continue
13233
13234                 if ec_mtimes is None:
13235                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13236                                 level=logging.ERROR, noiselevel=-1)
13237                         continue
13238
13239                 if modified_eclasses.intersection(ec_mtimes):
13240                         continue
13241
13242                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13243                 if missing_eclasses:
13244                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13245                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13246                                 noiselevel=-1)
13247                         continue
13248
13249                 eb_path = os.path.join(portdir, relative_eb_path)
13250                 try:
13251                         current_eb_mtime = os.stat(eb_path)
13252                 except OSError:
13253                         writemsg_level("!!! Missing ebuild: %s\n" % \
13254                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13255                         continue
13256
13257                 inconsistent = False
13258                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13259                         updated_mtime = updated_ec_mtimes.get(ec)
13260                         if updated_mtime is not None and updated_mtime != ec_mtime:
13261                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13262                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13263                                 inconsistent = True
13264                                 break
13265
13266                 if inconsistent:
13267                         continue
13268
13269                 if current_eb_mtime != eb_mtime:
13270                         os.utime(eb_path, (eb_mtime, eb_mtime))
13271
13272                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13273                         if ec in updated_ec_mtimes:
13274                                 continue
13275                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13276                         current_mtime = long(os.stat(ec_path).st_mtime)
13277                         if current_mtime != ec_mtime:
13278                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13279                         updated_ec_mtimes[ec] = ec_mtime
13280
13281         return os.EX_OK
13282
13283 def action_metadata(settings, portdb, myopts):
13284         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13285         old_umask = os.umask(0002)
13286         cachedir = os.path.normpath(settings.depcachedir)
13287         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13288                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13289                                         "/sys", "/tmp", "/usr",  "/var"]:
13290                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13291                         "ROOT DIRECTORY ON YOUR SYSTEM."
13292                 print >> sys.stderr, \
13293                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13294                 sys.exit(73)
13295         if not os.path.exists(cachedir):
13296                 os.mkdir(cachedir)
13297
13298         ec = portage.eclass_cache.cache(portdb.porttree_root)
13299         myportdir = os.path.realpath(settings["PORTDIR"])
13300         cm = settings.load_best_module("portdbapi.metadbmodule")(
13301                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13302
13303         from portage.cache import util
13304
13305         class percentage_noise_maker(util.quiet_mirroring):
13306                 def __init__(self, dbapi):
13307                         self.dbapi = dbapi
13308                         self.cp_all = dbapi.cp_all()
13309                         l = len(self.cp_all)
13310                         self.call_update_min = 100000000
13311                         self.min_cp_all = l/100.0
13312                         self.count = 1
13313                         self.pstr = ''
13314
13315                 def __iter__(self):
13316                         for x in self.cp_all:
13317                                 self.count += 1
13318                                 if self.count > self.min_cp_all:
13319                                         self.call_update_min = 0
13320                                         self.count = 0
13321                                 for y in self.dbapi.cp_list(x):
13322                                         yield y
13323                         self.call_update_mine = 0
13324
13325                 def update(self, *arg):
13326                         try:
13327                                 self.pstr = int(self.pstr) + 1
13328                         except ValueError:
13329                                 self.pstr = 1
13330                         sys.stdout.write("%s%i%%" % \
13331                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13332                         sys.stdout.flush()
13333                         self.call_update_min = 10000000
13334
13335                 def finish(self, *arg):
13336                         sys.stdout.write("\b\b\b\b100%\n")
13337                         sys.stdout.flush()
13338
13339         if "--quiet" in myopts:
13340                 def quicky_cpv_generator(cp_all_list):
13341                         for x in cp_all_list:
13342                                 for y in portdb.cp_list(x):
13343                                         yield y
13344                 source = quicky_cpv_generator(portdb.cp_all())
13345                 noise_maker = portage.cache.util.quiet_mirroring()
13346         else:
13347                 noise_maker = source = percentage_noise_maker(portdb)
13348         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13349                 eclass_cache=ec, verbose_instance=noise_maker)
13350
13351         sys.stdout.flush()
13352         os.umask(old_umask)
13353
13354 def action_regen(settings, portdb, max_jobs, max_load):
13355         xterm_titles = "notitles" not in settings.features
13356         emergelog(xterm_titles, " === regen")
13357         #regenerate cache entries
13358         portage.writemsg_stdout("Regenerating cache entries...\n")
13359         try:
13360                 os.close(sys.stdin.fileno())
13361         except SystemExit, e:
13362                 raise # Needed else can't exit
13363         except:
13364                 pass
13365         sys.stdout.flush()
13366
13367         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13368         regen.run()
13369
13370         portage.writemsg_stdout("done!\n")
13371         return regen.returncode
13372
13373 def action_config(settings, trees, myopts, myfiles):
13374         if len(myfiles) != 1:
13375                 print red("!!! config can only take a single package atom at this time\n")
13376                 sys.exit(1)
13377         if not is_valid_package_atom(myfiles[0]):
13378                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13379                         noiselevel=-1)
13380                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13381                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13382                 sys.exit(1)
13383         print
13384         try:
13385                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13386         except portage.exception.AmbiguousPackageName, e:
13387                 # Multiple matches thrown from cpv_expand
13388                 pkgs = e.args[0]
13389         if len(pkgs) == 0:
13390                 print "No packages found.\n"
13391                 sys.exit(0)
13392         elif len(pkgs) > 1:
13393                 if "--ask" in myopts:
13394                         options = []
13395                         print "Please select a package to configure:"
13396                         idx = 0
13397                         for pkg in pkgs:
13398                                 idx += 1
13399                                 options.append(str(idx))
13400                                 print options[-1]+") "+pkg
13401                         print "X) Cancel"
13402                         options.append("X")
13403                         idx = userquery("Selection?", options)
13404                         if idx == "X":
13405                                 sys.exit(0)
13406                         pkg = pkgs[int(idx)-1]
13407                 else:
13408                         print "The following packages available:"
13409                         for pkg in pkgs:
13410                                 print "* "+pkg
13411                         print "\nPlease use a specific atom or the --ask option."
13412                         sys.exit(1)
13413         else:
13414                 pkg = pkgs[0]
13415
13416         print
13417         if "--ask" in myopts:
13418                 if userquery("Ready to configure "+pkg+"?") == "No":
13419                         sys.exit(0)
13420         else:
13421                 print "Configuring pkg..."
13422         print
13423         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13424         mysettings = portage.config(clone=settings)
13425         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13426         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13427         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13428                 mysettings,
13429                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13430                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13431         if retval == os.EX_OK:
13432                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13433                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13434         print
13435
13436 def action_info(settings, trees, myopts, myfiles):
13437         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13438                 settings.profile_path, settings["CHOST"],
13439                 trees[settings["ROOT"]]["vartree"].dbapi)
13440         header_width = 65
13441         header_title = "System Settings"
13442         if myfiles:
13443                 print header_width * "="
13444                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13445         print header_width * "="
13446         print "System uname: "+platform.platform(aliased=1)
13447
13448         lastSync = portage.grabfile(os.path.join(
13449                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13450         print "Timestamp of tree:",
13451         if lastSync:
13452                 print lastSync[0]
13453         else:
13454                 print "Unknown"
13455
13456         output=commands.getstatusoutput("distcc --version")
13457         if not output[0]:
13458                 print str(output[1].split("\n",1)[0]),
13459                 if "distcc" in settings.features:
13460                         print "[enabled]"
13461                 else:
13462                         print "[disabled]"
13463
13464         output=commands.getstatusoutput("ccache -V")
13465         if not output[0]:
13466                 print str(output[1].split("\n",1)[0]),
13467                 if "ccache" in settings.features:
13468                         print "[enabled]"
13469                 else:
13470                         print "[disabled]"
13471
13472         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13473                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13474         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13475         myvars  = portage.util.unique_array(myvars)
13476         myvars.sort()
13477
13478         for x in myvars:
13479                 if portage.isvalidatom(x):
13480                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13481                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13482                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13483                         pkgs = []
13484                         for pn, ver, rev in pkg_matches:
13485                                 if rev != "r0":
13486                                         pkgs.append(ver + "-" + rev)
13487                                 else:
13488                                         pkgs.append(ver)
13489                         if pkgs:
13490                                 pkgs = ", ".join(pkgs)
13491                                 print "%-20s %s" % (x+":", pkgs)
13492                 else:
13493                         print "%-20s %s" % (x+":", "[NOT VALID]")
13494
13495         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13496
13497         if "--verbose" in myopts:
13498                 myvars=settings.keys()
13499         else:
13500                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13501                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13502                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13503                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13504
13505                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13506
13507         myvars = portage.util.unique_array(myvars)
13508         unset_vars = []
13509         myvars.sort()
13510         for x in myvars:
13511                 if x in settings:
13512                         if x != "USE":
13513                                 print '%s="%s"' % (x, settings[x])
13514                         else:
13515                                 use = set(settings["USE"].split())
13516                                 use_expand = settings["USE_EXPAND"].split()
13517                                 use_expand.sort()
13518                                 for varname in use_expand:
13519                                         flag_prefix = varname.lower() + "_"
13520                                         for f in list(use):
13521                                                 if f.startswith(flag_prefix):
13522                                                         use.remove(f)
13523                                 use = list(use)
13524                                 use.sort()
13525                                 print 'USE="%s"' % " ".join(use),
13526                                 for varname in use_expand:
13527                                         myval = settings.get(varname)
13528                                         if myval:
13529                                                 print '%s="%s"' % (varname, myval),
13530                                 print
13531                 else:
13532                         unset_vars.append(x)
13533         if unset_vars:
13534                 print "Unset:  "+", ".join(unset_vars)
13535         print
13536
13537         if "--debug" in myopts:
13538                 for x in dir(portage):
13539                         module = getattr(portage, x)
13540                         if "cvs_id_string" in dir(module):
13541                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13542
13543         # See if we can find any packages installed matching the strings
13544         # passed on the command line
13545         mypkgs = []
13546         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13547         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13548         for x in myfiles:
13549                 mypkgs.extend(vardb.match(x))
13550
13551         # If some packages were found...
13552         if mypkgs:
13553                 # Get our global settings (we only print stuff if it varies from
13554                 # the current config)
13555                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13556                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
13557                 global_vals = {}
13558                 pkgsettings = portage.config(clone=settings)
13559
13560                 for myvar in mydesiredvars:
13561                         global_vals[myvar] = set(settings.get(myvar, "").split())
13562
13563                 # Loop through each package
13564                 # Only print settings if they differ from global settings
13565                 header_title = "Package Settings"
13566                 print header_width * "="
13567                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13568                 print header_width * "="
13569                 from portage.output import EOutput
13570                 out = EOutput()
13571                 for pkg in mypkgs:
13572                         # Get all package specific variables
13573                         auxvalues = vardb.aux_get(pkg, auxkeys)
13574                         valuesmap = {}
13575                         for i in xrange(len(auxkeys)):
13576                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
13577                         diff_values = {}
13578                         for myvar in mydesiredvars:
13579                                 # If the package variable doesn't match the
13580                                 # current global variable, something has changed
13581                                 # so set diff_found so we know to print
13582                                 if valuesmap[myvar] != global_vals[myvar]:
13583                                         diff_values[myvar] = valuesmap[myvar]
13584                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
13585                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
13586                         pkgsettings.reset()
13587                         # If a matching ebuild is no longer available in the tree, maybe it
13588                         # would make sense to compare against the flags for the best
13589                         # available version with the same slot?
13590                         mydb = None
13591                         if portdb.cpv_exists(pkg):
13592                                 mydb = portdb
13593                         pkgsettings.setcpv(pkg, mydb=mydb)
13594                         if valuesmap["IUSE"].intersection(
13595                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
13596                                 diff_values["USE"] = valuesmap["USE"]
13597                         # If a difference was found, print the info for
13598                         # this package.
13599                         if diff_values:
13600                                 # Print package info
13601                                 print "%s was built with the following:" % pkg
13602                                 for myvar in mydesiredvars + ["USE"]:
13603                                         if myvar in diff_values:
13604                                                 mylist = list(diff_values[myvar])
13605                                                 mylist.sort()
13606                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13607                                 print
13608                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13609                         ebuildpath = vardb.findname(pkg)
13610                         if not ebuildpath or not os.path.exists(ebuildpath):
13611                                 out.ewarn("No ebuild found for '%s'" % pkg)
13612                                 continue
13613                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13614                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13615                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13616                                 tree="vartree")
13617
13618 def action_search(root_config, myopts, myfiles, spinner):
13619         if not myfiles:
13620                 print "emerge: no search terms provided."
13621         else:
13622                 searchinstance = search(root_config,
13623                         spinner, "--searchdesc" in myopts,
13624                         "--quiet" not in myopts, "--usepkg" in myopts,
13625                         "--usepkgonly" in myopts)
13626                 for mysearch in myfiles:
13627                         try:
13628                                 searchinstance.execute(mysearch)
13629                         except re.error, comment:
13630                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13631                                 sys.exit(1)
13632                         searchinstance.output()
13633
13634 def action_depclean(settings, trees, ldpath_mtimes,
13635         myopts, action, myfiles, spinner):
13636         # Kill packages that aren't explicitly merged or are required as a
13637         # dependency of another package. World file is explicit.
13638
13639         # Global depclean or prune operations are not very safe when there are
13640         # missing dependencies since it's unknown how badly incomplete
13641         # the dependency graph is, and we might accidentally remove packages
13642         # that should have been pulled into the graph. On the other hand, it's
13643         # relatively safe to ignore missing deps when only asked to remove
13644         # specific packages.
13645         allow_missing_deps = len(myfiles) > 0
13646
13647         msg = []
13648         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13649         msg.append("mistakes. Packages that are part of the world set will always\n")
13650         msg.append("be kept.  They can be manually added to this set with\n")
13651         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13652         msg.append("package.provided (see portage(5)) will be removed by\n")
13653         msg.append("depclean, even if they are part of the world set.\n")
13654         msg.append("\n")
13655         msg.append("As a safety measure, depclean will not remove any packages\n")
13656         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13657         msg.append("consequence, it is often necessary to run %s\n" % \
13658                 good("`emerge --update"))
13659         msg.append(good("--newuse --deep @system @world`") + \
13660                 " prior to depclean.\n")
13661
13662         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13663                 portage.writemsg_stdout("\n")
13664                 for x in msg:
13665                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13666
13667         xterm_titles = "notitles" not in settings.features
13668         myroot = settings["ROOT"]
13669         root_config = trees[myroot]["root_config"]
13670         getSetAtoms = root_config.setconfig.getSetAtoms
13671         vardb = trees[myroot]["vartree"].dbapi
13672
13673         required_set_names = ("system", "world")
13674         required_sets = {}
13675         set_args = []
13676
13677         for s in required_set_names:
13678                 required_sets[s] = InternalPackageSet(
13679                         initial_atoms=getSetAtoms(s))
13680
13681         
13682         # When removing packages, use a temporary version of world
13683         # which excludes packages that are intended to be eligible for
13684         # removal.
13685         world_temp_set = required_sets["world"]
13686         system_set = required_sets["system"]
13687
13688         if not system_set or not world_temp_set:
13689
13690                 if not system_set:
13691                         writemsg_level("!!! You have no system list.\n",
13692                                 level=logging.ERROR, noiselevel=-1)
13693
13694                 if not world_temp_set:
13695                         writemsg_level("!!! You have no world file.\n",
13696                                         level=logging.WARNING, noiselevel=-1)
13697
13698                 writemsg_level("!!! Proceeding is likely to " + \
13699                         "break your installation.\n",
13700                         level=logging.WARNING, noiselevel=-1)
13701                 if "--pretend" not in myopts:
13702                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13703
13704         if action == "depclean":
13705                 emergelog(xterm_titles, " >>> depclean")
13706
13707         import textwrap
13708         args_set = InternalPackageSet()
13709         if myfiles:
13710                 for x in myfiles:
13711                         if not is_valid_package_atom(x):
13712                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13713                                         level=logging.ERROR, noiselevel=-1)
13714                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13715                                 return
13716                         try:
13717                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13718                         except portage.exception.AmbiguousPackageName, e:
13719                                 msg = "The short ebuild name \"" + x + \
13720                                         "\" is ambiguous.  Please specify " + \
13721                                         "one of the following " + \
13722                                         "fully-qualified ebuild names instead:"
13723                                 for line in textwrap.wrap(msg, 70):
13724                                         writemsg_level("!!! %s\n" % (line,),
13725                                                 level=logging.ERROR, noiselevel=-1)
13726                                 for i in e[0]:
13727                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13728                                                 level=logging.ERROR, noiselevel=-1)
13729                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13730                                 return
13731                         args_set.add(atom)
13732                 matched_packages = False
13733                 for x in args_set:
13734                         if vardb.match(x):
13735                                 matched_packages = True
13736                                 break
13737                 if not matched_packages:
13738                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13739                                 action)
13740                         return
13741
13742         writemsg_level("\nCalculating dependencies  ")
13743         resolver_params = create_depgraph_params(myopts, "remove")
13744         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13745         vardb = resolver.trees[myroot]["vartree"].dbapi
13746
13747         if action == "depclean":
13748
13749                 if args_set:
13750                         # Pull in everything that's installed but not matched
13751                         # by an argument atom since we don't want to clean any
13752                         # package if something depends on it.
13753
13754                         world_temp_set.clear()
13755                         for pkg in vardb:
13756                                 spinner.update()
13757
13758                                 try:
13759                                         if args_set.findAtomForPackage(pkg) is None:
13760                                                 world_temp_set.add("=" + pkg.cpv)
13761                                                 continue
13762                                 except portage.exception.InvalidDependString, e:
13763                                         show_invalid_depstring_notice(pkg,
13764                                                 pkg.metadata["PROVIDE"], str(e))
13765                                         del e
13766                                         world_temp_set.add("=" + pkg.cpv)
13767                                         continue
13768
13769         elif action == "prune":
13770
13771                 # Pull in everything that's installed since we don't
13772                 # to prune a package if something depends on it.
13773                 world_temp_set.clear()
13774                 world_temp_set.update(vardb.cp_all())
13775
13776                 if not args_set:
13777
13778                         # Try to prune everything that's slotted.
13779                         for cp in vardb.cp_all():
13780                                 if len(vardb.cp_list(cp)) > 1:
13781                                         args_set.add(cp)
13782
13783                 # Remove atoms from world that match installed packages
13784                 # that are also matched by argument atoms, but do not remove
13785                 # them if they match the highest installed version.
13786                 for pkg in vardb:
13787                         spinner.update()
13788                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13789                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13790                                 raise AssertionError("package expected in matches: " + \
13791                                         "cp = %s, cpv = %s matches = %s" % \
13792                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13793
13794                         highest_version = pkgs_for_cp[-1]
13795                         if pkg == highest_version:
13796                                 # pkg is the highest version
13797                                 world_temp_set.add("=" + pkg.cpv)
13798                                 continue
13799
13800                         if len(pkgs_for_cp) <= 1:
13801                                 raise AssertionError("more packages expected: " + \
13802                                         "cp = %s, cpv = %s matches = %s" % \
13803                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13804
13805                         try:
13806                                 if args_set.findAtomForPackage(pkg) is None:
13807                                         world_temp_set.add("=" + pkg.cpv)
13808                                         continue
13809                         except portage.exception.InvalidDependString, e:
13810                                 show_invalid_depstring_notice(pkg,
13811                                         pkg.metadata["PROVIDE"], str(e))
13812                                 del e
13813                                 world_temp_set.add("=" + pkg.cpv)
13814                                 continue
13815
13816         set_args = {}
13817         for s, package_set in required_sets.iteritems():
13818                 set_atom = SETPREFIX + s
13819                 set_arg = SetArg(arg=set_atom, set=package_set,
13820                         root_config=resolver.roots[myroot])
13821                 set_args[s] = set_arg
13822                 for atom in set_arg.set:
13823                         resolver._dep_stack.append(
13824                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13825                         resolver.digraph.add(set_arg, None)
13826
13827         success = resolver._complete_graph()
13828         writemsg_level("\b\b... done!\n")
13829
13830         resolver.display_problems()
13831
13832         if not success:
13833                 return 1
13834
13835         def unresolved_deps():
13836
13837                 unresolvable = set()
13838                 for dep in resolver._initially_unsatisfied_deps:
13839                         if isinstance(dep.parent, Package) and \
13840                                 (dep.priority > UnmergeDepPriority.SOFT):
13841                                 unresolvable.add((dep.atom, dep.parent.cpv))
13842
13843                 if not unresolvable:
13844                         return False
13845
13846                 if unresolvable and not allow_missing_deps:
13847                         prefix = bad(" * ")
13848                         msg = []
13849                         msg.append("Dependencies could not be completely resolved due to")
13850                         msg.append("the following required packages not being installed:")
13851                         msg.append("")
13852                         for atom, parent in unresolvable:
13853                                 msg.append("  %s pulled in by:" % (atom,))
13854                                 msg.append("    %s" % (parent,))
13855                                 msg.append("")
13856                         msg.append("Have you forgotten to run " + \
13857                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13858                         msg.append(("to %s? It may be necessary to manually " + \
13859                                 "uninstall packages that no longer") % action)
13860                         msg.append("exist in the portage tree since " + \
13861                                 "it may not be possible to satisfy their")
13862                         msg.append("dependencies.  Also, be aware of " + \
13863                                 "the --with-bdeps option that is documented")
13864                         msg.append("in " + good("`man emerge`") + ".")
13865                         if action == "prune":
13866                                 msg.append("")
13867                                 msg.append("If you would like to ignore " + \
13868                                         "dependencies then use %s." % good("--nodeps"))
13869                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13870                                 level=logging.ERROR, noiselevel=-1)
13871                         return True
13872                 return False
13873
13874         if unresolved_deps():
13875                 return 1
13876
13877         graph = resolver.digraph.copy()
13878         required_pkgs_total = 0
13879         for node in graph:
13880                 if isinstance(node, Package):
13881                         required_pkgs_total += 1
13882
13883         def show_parents(child_node):
13884                 parent_nodes = graph.parent_nodes(child_node)
13885                 if not parent_nodes:
13886                         # With --prune, the highest version can be pulled in without any
13887                         # real parent since all installed packages are pulled in.  In that
13888                         # case there's nothing to show here.
13889                         return
13890                 parent_strs = []
13891                 for node in parent_nodes:
13892                         parent_strs.append(str(getattr(node, "cpv", node)))
13893                 parent_strs.sort()
13894                 msg = []
13895                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13896                 for parent_str in parent_strs:
13897                         msg.append("    %s\n" % (parent_str,))
13898                 msg.append("\n")
13899                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13900
13901         def cmp_pkg_cpv(pkg1, pkg2):
13902                 """Sort Package instances by cpv."""
13903                 if pkg1.cpv > pkg2.cpv:
13904                         return 1
13905                 elif pkg1.cpv == pkg2.cpv:
13906                         return 0
13907                 else:
13908                         return -1
13909
13910         def create_cleanlist():
13911                 pkgs_to_remove = []
13912
13913                 if action == "depclean":
13914                         if args_set:
13915
13916                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13917                                         arg_atom = None
13918                                         try:
13919                                                 arg_atom = args_set.findAtomForPackage(pkg)
13920                                         except portage.exception.InvalidDependString:
13921                                                 # this error has already been displayed by now
13922                                                 continue
13923
13924                                         if arg_atom:
13925                                                 if pkg not in graph:
13926                                                         pkgs_to_remove.append(pkg)
13927                                                 elif "--verbose" in myopts:
13928                                                         show_parents(pkg)
13929
13930                         else:
13931                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
13932                                         if pkg not in graph:
13933                                                 pkgs_to_remove.append(pkg)
13934                                         elif "--verbose" in myopts:
13935                                                 show_parents(pkg)
13936
13937                 elif action == "prune":
13938                         # Prune really uses all installed instead of world. It's not
13939                         # a real reverse dependency so don't display it as such.
13940                         graph.remove(set_args["world"])
13941
13942                         for atom in args_set:
13943                                 for pkg in vardb.match_pkgs(atom):
13944                                         if pkg not in graph:
13945                                                 pkgs_to_remove.append(pkg)
13946                                         elif "--verbose" in myopts:
13947                                                 show_parents(pkg)
13948
13949                 if not pkgs_to_remove:
13950                         writemsg_level(
13951                                 ">>> No packages selected for removal by %s\n" % action)
13952                         if "--verbose" not in myopts:
13953                                 writemsg_level(
13954                                         ">>> To see reverse dependencies, use %s\n" % \
13955                                                 good("--verbose"))
13956                         if action == "prune":
13957                                 writemsg_level(
13958                                         ">>> To ignore dependencies, use %s\n" % \
13959                                                 good("--nodeps"))
13960
13961                 return pkgs_to_remove
13962
13963         cleanlist = create_cleanlist()
13964
13965         if len(cleanlist):
13966                 clean_set = set(cleanlist)
13967
13968                 # Check if any of these package are the sole providers of libraries
13969                 # with consumers that have not been selected for removal. If so, these
13970                 # packages and any dependencies need to be added to the graph.
13971                 real_vardb = trees[myroot]["vartree"].dbapi
13972                 linkmap = real_vardb.linkmap
13973                 liblist = linkmap.listLibraryObjects()
13974                 consumer_cache = {}
13975                 provider_cache = {}
13976                 soname_cache = {}
13977                 consumer_map = {}
13978
13979                 writemsg_level(">>> Checking for lib consumers...\n")
13980
13981                 for pkg in cleanlist:
13982                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13983                         provided_libs = set()
13984
13985                         for lib in liblist:
13986                                 if pkg_dblink.isowner(lib, myroot):
13987                                         provided_libs.add(lib)
13988
13989                         if not provided_libs:
13990                                 continue
13991
13992                         consumers = {}
13993                         for lib in provided_libs:
13994                                 lib_consumers = consumer_cache.get(lib)
13995                                 if lib_consumers is None:
13996                                         lib_consumers = linkmap.findConsumers(lib)
13997                                         consumer_cache[lib] = lib_consumers
13998                                 if lib_consumers:
13999                                         consumers[lib] = lib_consumers
14000
14001                         if not consumers:
14002                                 continue
14003
14004                         for lib, lib_consumers in consumers.items():
14005                                 for consumer_file in list(lib_consumers):
14006                                         if pkg_dblink.isowner(consumer_file, myroot):
14007                                                 lib_consumers.remove(consumer_file)
14008                                 if not lib_consumers:
14009                                         del consumers[lib]
14010
14011                         if not consumers:
14012                                 continue
14013
14014                         for lib, lib_consumers in consumers.iteritems():
14015
14016                                 soname = soname_cache.get(lib)
14017                                 if soname is None:
14018                                         soname = linkmap.getSoname(lib)
14019                                         soname_cache[lib] = soname
14020
14021                                 consumer_providers = []
14022                                 for lib_consumer in lib_consumers:
14023                                         providers = provider_cache.get(lib)
14024                                         if providers is None:
14025                                                 providers = linkmap.findProviders(lib_consumer)
14026                                                 provider_cache[lib_consumer] = providers
14027                                         if soname not in providers:
14028                                                 # Why does this happen?
14029                                                 continue
14030                                         consumer_providers.append(
14031                                                 (lib_consumer, providers[soname]))
14032
14033                                 consumers[lib] = consumer_providers
14034
14035                         consumer_map[pkg] = consumers
14036
14037                 if consumer_map:
14038
14039                         search_files = set()
14040                         for consumers in consumer_map.itervalues():
14041                                 for lib, consumer_providers in consumers.iteritems():
14042                                         for lib_consumer, providers in consumer_providers:
14043                                                 search_files.add(lib_consumer)
14044                                                 search_files.update(providers)
14045
14046                         writemsg_level(">>> Assigning files to packages...\n")
14047                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14048
14049                         for pkg, consumers in consumer_map.items():
14050                                 for lib, consumer_providers in consumers.items():
14051                                         lib_consumers = set()
14052
14053                                         for lib_consumer, providers in consumer_providers:
14054                                                 owner_set = file_owners.get(lib_consumer)
14055                                                 provider_dblinks = set()
14056                                                 provider_pkgs = set()
14057
14058                                                 if len(providers) > 1:
14059                                                         for provider in providers:
14060                                                                 provider_set = file_owners.get(provider)
14061                                                                 if provider_set is not None:
14062                                                                         provider_dblinks.update(provider_set)
14063
14064                                                 if len(provider_dblinks) > 1:
14065                                                         for provider_dblink in provider_dblinks:
14066                                                                 pkg_key = ("installed", myroot,
14067                                                                         provider_dblink.mycpv, "nomerge")
14068                                                                 if pkg_key not in clean_set:
14069                                                                         provider_pkgs.add(vardb.get(pkg_key))
14070
14071                                                 if provider_pkgs:
14072                                                         continue
14073
14074                                                 if owner_set is not None:
14075                                                         lib_consumers.update(owner_set)
14076
14077                                         for consumer_dblink in list(lib_consumers):
14078                                                 if ("installed", myroot, consumer_dblink.mycpv,
14079                                                         "nomerge") in clean_set:
14080                                                         lib_consumers.remove(consumer_dblink)
14081                                                         continue
14082
14083                                         if lib_consumers:
14084                                                 consumers[lib] = lib_consumers
14085                                         else:
14086                                                 del consumers[lib]
14087                                 if not consumers:
14088                                         del consumer_map[pkg]
14089
14090                 if consumer_map:
14091                         # TODO: Implement a package set for rebuilding consumer packages.
14092
14093                         msg = "In order to avoid breakage of link level " + \
14094                                 "dependencies, one or more packages will not be removed. " + \
14095                                 "This can be solved by rebuilding " + \
14096                                 "the packages that pulled them in."
14097
14098                         prefix = bad(" * ")
14099                         from textwrap import wrap
14100                         writemsg_level("".join(prefix + "%s\n" % line for \
14101                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14102
14103                         msg = []
14104                         for pkg, consumers in consumer_map.iteritems():
14105                                 unique_consumers = set(chain(*consumers.values()))
14106                                 unique_consumers = sorted(consumer.mycpv \
14107                                         for consumer in unique_consumers)
14108                                 msg.append("")
14109                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14110                                 for consumer in unique_consumers:
14111                                         msg.append("    %s" % (consumer,))
14112                         msg.append("")
14113                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14114                                 level=logging.WARNING, noiselevel=-1)
14115
14116                         # Add lib providers to the graph as children of lib consumers,
14117                         # and also add any dependencies pulled in by the provider.
14118                         writemsg_level(">>> Adding lib providers to graph...\n")
14119
14120                         for pkg, consumers in consumer_map.iteritems():
14121                                 for consumer_dblink in set(chain(*consumers.values())):
14122                                         consumer_pkg = vardb.get(("installed", myroot,
14123                                                 consumer_dblink.mycpv, "nomerge"))
14124                                         if not resolver._add_pkg(pkg,
14125                                                 Dependency(parent=consumer_pkg,
14126                                                 priority=UnmergeDepPriority(runtime=True),
14127                                                 root=pkg.root)):
14128                                                 resolver.display_problems()
14129                                                 return 1
14130
14131                         writemsg_level("\nCalculating dependencies  ")
14132                         success = resolver._complete_graph()
14133                         writemsg_level("\b\b... done!\n")
14134                         resolver.display_problems()
14135                         if not success:
14136                                 return 1
14137                         if unresolved_deps():
14138                                 return 1
14139
14140                         graph = resolver.digraph.copy()
14141                         required_pkgs_total = 0
14142                         for node in graph:
14143                                 if isinstance(node, Package):
14144                                         required_pkgs_total += 1
14145                         cleanlist = create_cleanlist()
14146                         if not cleanlist:
14147                                 return 0
14148                         clean_set = set(cleanlist)
14149
14150                 # Use a topological sort to create an unmerge order such that
14151                 # each package is unmerged before it's dependencies. This is
14152                 # necessary to avoid breaking things that may need to run
14153                 # during pkg_prerm or pkg_postrm phases.
14154
14155                 # Create a new graph to account for dependencies between the
14156                 # packages being unmerged.
14157                 graph = digraph()
14158                 del cleanlist[:]
14159
14160                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14161                 runtime = UnmergeDepPriority(runtime=True)
14162                 runtime_post = UnmergeDepPriority(runtime_post=True)
14163                 buildtime = UnmergeDepPriority(buildtime=True)
14164                 priority_map = {
14165                         "RDEPEND": runtime,
14166                         "PDEPEND": runtime_post,
14167                         "DEPEND": buildtime,
14168                 }
14169
14170                 for node in clean_set:
14171                         graph.add(node, None)
14172                         mydeps = []
14173                         node_use = node.metadata["USE"].split()
14174                         for dep_type in dep_keys:
14175                                 depstr = node.metadata[dep_type]
14176                                 if not depstr:
14177                                         continue
14178                                 try:
14179                                         portage.dep._dep_check_strict = False
14180                                         success, atoms = portage.dep_check(depstr, None, settings,
14181                                                 myuse=node_use, trees=resolver._graph_trees,
14182                                                 myroot=myroot)
14183                                 finally:
14184                                         portage.dep._dep_check_strict = True
14185                                 if not success:
14186                                         # Ignore invalid deps of packages that will
14187                                         # be uninstalled anyway.
14188                                         continue
14189
14190                                 priority = priority_map[dep_type]
14191                                 for atom in atoms:
14192                                         if not isinstance(atom, portage.dep.Atom):
14193                                                 # Ignore invalid atoms returned from dep_check().
14194                                                 continue
14195                                         if atom.blocker:
14196                                                 continue
14197                                         matches = vardb.match_pkgs(atom)
14198                                         if not matches:
14199                                                 continue
14200                                         for child_node in matches:
14201                                                 if child_node in clean_set:
14202                                                         graph.add(child_node, node, priority=priority)
14203
14204                 ordered = True
14205                 if len(graph.order) == len(graph.root_nodes()):
14206                         # If there are no dependencies between packages
14207                         # let unmerge() group them by cat/pn.
14208                         ordered = False
14209                         cleanlist = [pkg.cpv for pkg in graph.order]
14210                 else:
14211                         # Order nodes from lowest to highest overall reference count for
14212                         # optimal root node selection.
14213                         node_refcounts = {}
14214                         for node in graph.order:
14215                                 node_refcounts[node] = len(graph.parent_nodes(node))
14216                         def cmp_reference_count(node1, node2):
14217                                 return node_refcounts[node1] - node_refcounts[node2]
14218                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14219         
14220                         ignore_priority_range = [None]
14221                         ignore_priority_range.extend(
14222                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14223                         while not graph.empty():
14224                                 for ignore_priority in ignore_priority_range:
14225                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14226                                         if nodes:
14227                                                 break
14228                                 if not nodes:
14229                                         raise AssertionError("no root nodes")
14230                                 if ignore_priority is not None:
14231                                         # Some deps have been dropped due to circular dependencies,
14232                                         # so only pop one node in order do minimize the number that
14233                                         # are dropped.
14234                                         del nodes[1:]
14235                                 for node in nodes:
14236                                         graph.remove(node)
14237                                         cleanlist.append(node.cpv)
14238
14239                 unmerge(root_config, myopts, "unmerge", cleanlist,
14240                         ldpath_mtimes, ordered=ordered)
14241
14242         if action == "prune":
14243                 return
14244
14245         if not cleanlist and "--quiet" in myopts:
14246                 return
14247
14248         print "Packages installed:   "+str(len(vardb.cpv_all()))
14249         print "Packages in world:    " + \
14250                 str(len(root_config.sets["world"].getAtoms()))
14251         print "Packages in system:   " + \
14252                 str(len(root_config.sets["system"].getAtoms()))
14253         print "Required packages:    "+str(required_pkgs_total)
14254         if "--pretend" in myopts:
14255                 print "Number to remove:     "+str(len(cleanlist))
14256         else:
14257                 print "Number removed:       "+str(len(cleanlist))
14258
14259 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14260         """
14261         Construct a depgraph for the given resume list. This will raise
14262         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14263         @rtype: tuple
14264         @returns: (success, depgraph, dropped_tasks)
14265         """
14266         skip_masked = True
14267         skip_unsatisfied = True
14268         mergelist = mtimedb["resume"]["mergelist"]
14269         dropped_tasks = set()
14270         while True:
14271                 mydepgraph = depgraph(settings, trees,
14272                         myopts, myparams, spinner)
14273                 try:
14274                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14275                                 skip_masked=skip_masked)
14276                 except depgraph.UnsatisfiedResumeDep, e:
14277                         if not skip_unsatisfied:
14278                                 raise
14279
14280                         graph = mydepgraph.digraph
14281                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14282                                 for dep in e.value)
14283                         traversed_nodes = set()
14284                         unsatisfied_stack = list(unsatisfied_parents)
14285                         while unsatisfied_stack:
14286                                 pkg = unsatisfied_stack.pop()
14287                                 if pkg in traversed_nodes:
14288                                         continue
14289                                 traversed_nodes.add(pkg)
14290
14291                                 # If this package was pulled in by a parent
14292                                 # package scheduled for merge, removing this
14293                                 # package may cause the the parent package's
14294                                 # dependency to become unsatisfied.
14295                                 for parent_node in graph.parent_nodes(pkg):
14296                                         if not isinstance(parent_node, Package) \
14297                                                 or parent_node.operation not in ("merge", "nomerge"):
14298                                                 continue
14299                                         unsatisfied = \
14300                                                 graph.child_nodes(parent_node,
14301                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14302                                         if pkg in unsatisfied:
14303                                                 unsatisfied_parents[parent_node] = parent_node
14304                                                 unsatisfied_stack.append(parent_node)
14305
14306                         pruned_mergelist = []
14307                         for x in mergelist:
14308                                 if isinstance(x, list) and \
14309                                         tuple(x) not in unsatisfied_parents:
14310                                         pruned_mergelist.append(x)
14311
14312                         # If the mergelist doesn't shrink then this loop is infinite.
14313                         if len(pruned_mergelist) == len(mergelist):
14314                                 # This happens if a package can't be dropped because
14315                                 # it's already installed, but it has unsatisfied PDEPEND.
14316                                 raise
14317                         mergelist[:] = pruned_mergelist
14318
14319                         # Exclude installed packages that have been removed from the graph due
14320                         # to failure to build/install runtime dependencies after the dependent
14321                         # package has already been installed.
14322                         dropped_tasks.update(pkg for pkg in \
14323                                 unsatisfied_parents if pkg.operation != "nomerge")
14324                         mydepgraph.break_refs(unsatisfied_parents)
14325
14326                         del e, graph, traversed_nodes, \
14327                                 unsatisfied_parents, unsatisfied_stack
14328                         continue
14329                 else:
14330                         break
14331         return (success, mydepgraph, dropped_tasks)
14332
14333 def action_build(settings, trees, mtimedb,
14334         myopts, myaction, myfiles, spinner):
14335
14336         # validate the state of the resume data
14337         # so that we can make assumptions later.
14338         for k in ("resume", "resume_backup"):
14339                 if k not in mtimedb:
14340                         continue
14341                 resume_data = mtimedb[k]
14342                 if not isinstance(resume_data, dict):
14343                         del mtimedb[k]
14344                         continue
14345                 mergelist = resume_data.get("mergelist")
14346                 if not isinstance(mergelist, list):
14347                         del mtimedb[k]
14348                         continue
14349                 for x in mergelist:
14350                         if not (isinstance(x, list) and len(x) == 4):
14351                                 continue
14352                         pkg_type, pkg_root, pkg_key, pkg_action = x
14353                         if pkg_root not in trees:
14354                                 # Current $ROOT setting differs,
14355                                 # so the list must be stale.
14356                                 mergelist = None
14357                                 break
14358                 if not mergelist:
14359                         del mtimedb[k]
14360                         continue
14361                 resume_opts = resume_data.get("myopts")
14362                 if not isinstance(resume_opts, (dict, list)):
14363                         del mtimedb[k]
14364                         continue
14365                 favorites = resume_data.get("favorites")
14366                 if not isinstance(favorites, list):
14367                         del mtimedb[k]
14368                         continue
14369
14370         resume = False
14371         if "--resume" in myopts and \
14372                 ("resume" in mtimedb or
14373                 "resume_backup" in mtimedb):
14374                 resume = True
14375                 if "resume" not in mtimedb:
14376                         mtimedb["resume"] = mtimedb["resume_backup"]
14377                         del mtimedb["resume_backup"]
14378                         mtimedb.commit()
14379                 # "myopts" is a list for backward compatibility.
14380                 resume_opts = mtimedb["resume"].get("myopts", [])
14381                 if isinstance(resume_opts, list):
14382                         resume_opts = dict((k,True) for k in resume_opts)
14383                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14384                         resume_opts.pop(opt, None)
14385                 myopts.update(resume_opts)
14386
14387                 if "--debug" in myopts:
14388                         writemsg_level("myopts %s\n" % (myopts,))
14389
14390                 # Adjust config according to options of the command being resumed.
14391                 for myroot in trees:
14392                         mysettings =  trees[myroot]["vartree"].settings
14393                         mysettings.unlock()
14394                         adjust_config(myopts, mysettings)
14395                         mysettings.lock()
14396                         del myroot, mysettings
14397
14398         ldpath_mtimes = mtimedb["ldpath"]
14399         favorites=[]
14400         merge_count = 0
14401         buildpkgonly = "--buildpkgonly" in myopts
14402         pretend = "--pretend" in myopts
14403         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14404         ask = "--ask" in myopts
14405         nodeps = "--nodeps" in myopts
14406         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14407         tree = "--tree" in myopts
14408         if nodeps and tree:
14409                 tree = False
14410                 del myopts["--tree"]
14411                 portage.writemsg(colorize("WARN", " * ") + \
14412                         "--tree is broken with --nodeps. Disabling...\n")
14413         debug = "--debug" in myopts
14414         verbose = "--verbose" in myopts
14415         quiet = "--quiet" in myopts
14416         if pretend or fetchonly:
14417                 # make the mtimedb readonly
14418                 mtimedb.filename = None
14419         if '--digest' in myopts or 'digest' in settings.features:
14420                 if '--digest' in myopts:
14421                         msg = "The --digest option"
14422                 else:
14423                         msg = "The FEATURES=digest setting"
14424
14425                 msg += " can prevent corruption from being" + \
14426                         " noticed. The `repoman manifest` command is the preferred" + \
14427                         " way to generate manifests and it is capable of doing an" + \
14428                         " entire repository or category at once."
14429                 prefix = bad(" * ")
14430                 writemsg(prefix + "\n")
14431                 from textwrap import wrap
14432                 for line in wrap(msg, 72):
14433                         writemsg("%s%s\n" % (prefix, line))
14434                 writemsg(prefix + "\n")
14435
14436         if "--quiet" not in myopts and \
14437                 ("--pretend" in myopts or "--ask" in myopts or \
14438                 "--tree" in myopts or "--verbose" in myopts):
14439                 action = ""
14440                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14441                         action = "fetched"
14442                 elif "--buildpkgonly" in myopts:
14443                         action = "built"
14444                 else:
14445                         action = "merged"
14446                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14447                         print
14448                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14449                         print
14450                 else:
14451                         print
14452                         print darkgreen("These are the packages that would be %s, in order:") % action
14453                         print
14454
14455         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14456         if not show_spinner:
14457                 spinner.update = spinner.update_quiet
14458
14459         if resume:
14460                 favorites = mtimedb["resume"].get("favorites")
14461                 if not isinstance(favorites, list):
14462                         favorites = []
14463
14464                 if show_spinner:
14465                         print "Calculating dependencies  ",
14466                 myparams = create_depgraph_params(myopts, myaction)
14467
14468                 resume_data = mtimedb["resume"]
14469                 mergelist = resume_data["mergelist"]
14470                 if mergelist and "--skipfirst" in myopts:
14471                         for i, task in enumerate(mergelist):
14472                                 if isinstance(task, list) and \
14473                                         task and task[-1] == "merge":
14474                                         del mergelist[i]
14475                                         break
14476
14477                 success = False
14478                 mydepgraph = None
14479                 try:
14480                         success, mydepgraph, dropped_tasks = resume_depgraph(
14481                                 settings, trees, mtimedb, myopts, myparams, spinner)
14482                 except (portage.exception.PackageNotFound,
14483                         depgraph.UnsatisfiedResumeDep), e:
14484                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14485                                 mydepgraph = e.depgraph
14486                         if show_spinner:
14487                                 print
14488                         from textwrap import wrap
14489                         from portage.output import EOutput
14490                         out = EOutput()
14491
14492                         resume_data = mtimedb["resume"]
14493                         mergelist = resume_data.get("mergelist")
14494                         if not isinstance(mergelist, list):
14495                                 mergelist = []
14496                         if mergelist and debug or (verbose and not quiet):
14497                                 out.eerror("Invalid resume list:")
14498                                 out.eerror("")
14499                                 indent = "  "
14500                                 for task in mergelist:
14501                                         if isinstance(task, list):
14502                                                 out.eerror(indent + str(tuple(task)))
14503                                 out.eerror("")
14504
14505                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14506                                 out.eerror("One or more packages are either masked or " + \
14507                                         "have missing dependencies:")
14508                                 out.eerror("")
14509                                 indent = "  "
14510                                 for dep in e.value:
14511                                         if dep.atom is None:
14512                                                 out.eerror(indent + "Masked package:")
14513                                                 out.eerror(2 * indent + str(dep.parent))
14514                                                 out.eerror("")
14515                                         else:
14516                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14517                                                 out.eerror(2 * indent + str(dep.parent))
14518                                                 out.eerror("")
14519                                 msg = "The resume list contains packages " + \
14520                                         "that are either masked or have " + \
14521                                         "unsatisfied dependencies. " + \
14522                                         "Please restart/continue " + \
14523                                         "the operation manually, or use --skipfirst " + \
14524                                         "to skip the first package in the list and " + \
14525                                         "any other packages that may be " + \
14526                                         "masked or have missing dependencies."
14527                                 for line in wrap(msg, 72):
14528                                         out.eerror(line)
14529                         elif isinstance(e, portage.exception.PackageNotFound):
14530                                 out.eerror("An expected package is " + \
14531                                         "not available: %s" % str(e))
14532                                 out.eerror("")
14533                                 msg = "The resume list contains one or more " + \
14534                                         "packages that are no longer " + \
14535                                         "available. Please restart/continue " + \
14536                                         "the operation manually."
14537                                 for line in wrap(msg, 72):
14538                                         out.eerror(line)
14539                 else:
14540                         if show_spinner:
14541                                 print "\b\b... done!"
14542
14543                 if success:
14544                         if dropped_tasks:
14545                                 portage.writemsg("!!! One or more packages have been " + \
14546                                         "dropped due to\n" + \
14547                                         "!!! masking or unsatisfied dependencies:\n\n",
14548                                         noiselevel=-1)
14549                                 for task in dropped_tasks:
14550                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14551                                 portage.writemsg("\n", noiselevel=-1)
14552                         del dropped_tasks
14553                 else:
14554                         if mydepgraph is not None:
14555                                 mydepgraph.display_problems()
14556                         if not (ask or pretend):
14557                                 # delete the current list and also the backup
14558                                 # since it's probably stale too.
14559                                 for k in ("resume", "resume_backup"):
14560                                         mtimedb.pop(k, None)
14561                                 mtimedb.commit()
14562
14563                         return 1
14564         else:
14565                 if ("--resume" in myopts):
14566                         print darkgreen("emerge: It seems we have nothing to resume...")
14567                         return os.EX_OK
14568
14569                 myparams = create_depgraph_params(myopts, myaction)
14570                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14571                         print "Calculating dependencies  ",
14572                         sys.stdout.flush()
14573                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14574                 try:
14575                         retval, favorites = mydepgraph.select_files(myfiles)
14576                 except portage.exception.PackageNotFound, e:
14577                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14578                         return 1
14579                 except portage.exception.PackageSetNotFound, e:
14580                         root_config = trees[settings["ROOT"]]["root_config"]
14581                         display_missing_pkg_set(root_config, e.value)
14582                         return 1
14583                 if show_spinner:
14584                         print "\b\b... done!"
14585                 if not retval:
14586                         mydepgraph.display_problems()
14587                         return 1
14588
14589         if "--pretend" not in myopts and \
14590                 ("--ask" in myopts or "--tree" in myopts or \
14591                 "--verbose" in myopts) and \
14592                 not ("--quiet" in myopts and "--ask" not in myopts):
14593                 if "--resume" in myopts:
14594                         mymergelist = mydepgraph.altlist()
14595                         if len(mymergelist) == 0:
14596                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14597                                 return os.EX_OK
14598                         favorites = mtimedb["resume"]["favorites"]
14599                         retval = mydepgraph.display(
14600                                 mydepgraph.altlist(reversed=tree),
14601                                 favorites=favorites)
14602                         mydepgraph.display_problems()
14603                         if retval != os.EX_OK:
14604                                 return retval
14605                         prompt="Would you like to resume merging these packages?"
14606                 else:
14607                         retval = mydepgraph.display(
14608                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14609                                 favorites=favorites)
14610                         mydepgraph.display_problems()
14611                         if retval != os.EX_OK:
14612                                 return retval
14613                         mergecount=0
14614                         for x in mydepgraph.altlist():
14615                                 if isinstance(x, Package) and x.operation == "merge":
14616                                         mergecount += 1
14617
14618                         if mergecount==0:
14619                                 sets = trees[settings["ROOT"]]["root_config"].sets
14620                                 world_candidates = None
14621                                 if "--noreplace" in myopts and \
14622                                         not oneshot and favorites:
14623                                         # Sets that are not world candidates are filtered
14624                                         # out here since the favorites list needs to be
14625                                         # complete for depgraph.loadResumeCommand() to
14626                                         # operate correctly.
14627                                         world_candidates = [x for x in favorites \
14628                                                 if not (x.startswith(SETPREFIX) and \
14629                                                 not sets[x[1:]].world_candidate)]
14630                                 if "--noreplace" in myopts and \
14631                                         not oneshot and world_candidates:
14632                                         print
14633                                         for x in world_candidates:
14634                                                 print " %s %s" % (good("*"), x)
14635                                         prompt="Would you like to add these packages to your world favorites?"
14636                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14637                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14638                                 else:
14639                                         print
14640                                         print "Nothing to merge; quitting."
14641                                         print
14642                                         return os.EX_OK
14643                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14644                                 prompt="Would you like to fetch the source files for these packages?"
14645                         else:
14646                                 prompt="Would you like to merge these packages?"
14647                 print
14648                 if "--ask" in myopts and userquery(prompt) == "No":
14649                         print
14650                         print "Quitting."
14651                         print
14652                         return os.EX_OK
14653                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14654                 myopts.pop("--ask", None)
14655
14656         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14657                 if ("--resume" in myopts):
14658                         mymergelist = mydepgraph.altlist()
14659                         if len(mymergelist) == 0:
14660                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14661                                 return os.EX_OK
14662                         favorites = mtimedb["resume"]["favorites"]
14663                         retval = mydepgraph.display(
14664                                 mydepgraph.altlist(reversed=tree),
14665                                 favorites=favorites)
14666                         mydepgraph.display_problems()
14667                         if retval != os.EX_OK:
14668                                 return retval
14669                 else:
14670                         retval = mydepgraph.display(
14671                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14672                                 favorites=favorites)
14673                         mydepgraph.display_problems()
14674                         if retval != os.EX_OK:
14675                                 return retval
14676                         if "--buildpkgonly" in myopts:
14677                                 graph_copy = mydepgraph.digraph.clone()
14678                                 removed_nodes = set()
14679                                 for node in graph_copy:
14680                                         if not isinstance(node, Package) or \
14681                                                 node.operation == "nomerge":
14682                                                 removed_nodes.add(node)
14683                                 graph_copy.difference_update(removed_nodes)
14684                                 if not graph_copy.hasallzeros(ignore_priority = \
14685                                         DepPrioritySatisfiedRange.ignore_medium):
14686                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14687                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14688                                         return 1
14689         else:
14690                 if "--buildpkgonly" in myopts:
14691                         graph_copy = mydepgraph.digraph.clone()
14692                         removed_nodes = set()
14693                         for node in graph_copy:
14694                                 if not isinstance(node, Package) or \
14695                                         node.operation == "nomerge":
14696                                         removed_nodes.add(node)
14697                         graph_copy.difference_update(removed_nodes)
14698                         if not graph_copy.hasallzeros(ignore_priority = \
14699                                 DepPrioritySatisfiedRange.ignore_medium):
14700                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14701                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14702                                 return 1
14703
14704                 if ("--resume" in myopts):
14705                         favorites=mtimedb["resume"]["favorites"]
14706                         mymergelist = mydepgraph.altlist()
14707                         mydepgraph.break_refs(mymergelist)
14708                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14709                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14710                         del mydepgraph, mymergelist
14711                         clear_caches(trees)
14712
14713                         retval = mergetask.merge()
14714                         merge_count = mergetask.curval
14715                 else:
14716                         if "resume" in mtimedb and \
14717                         "mergelist" in mtimedb["resume"] and \
14718                         len(mtimedb["resume"]["mergelist"]) > 1:
14719                                 mtimedb["resume_backup"] = mtimedb["resume"]
14720                                 del mtimedb["resume"]
14721                                 mtimedb.commit()
14722                         mtimedb["resume"]={}
14723                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14724                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14725                         # a list type for options.
14726                         mtimedb["resume"]["myopts"] = myopts.copy()
14727
14728                         # Convert Atom instances to plain str.
14729                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14730
14731                         pkglist = mydepgraph.altlist()
14732                         mydepgraph.saveNomergeFavorites()
14733                         mydepgraph.break_refs(pkglist)
14734                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14735                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14736                         del mydepgraph, pkglist
14737                         clear_caches(trees)
14738
14739                         retval = mergetask.merge()
14740                         merge_count = mergetask.curval
14741
14742                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14743                         if "yes" == settings.get("AUTOCLEAN"):
14744                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14745                                 unmerge(trees[settings["ROOT"]]["root_config"],
14746                                         myopts, "clean", [],
14747                                         ldpath_mtimes, autoclean=1)
14748                         else:
14749                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14750                                         + " AUTOCLEAN is disabled.  This can cause serious"
14751                                         + " problems due to overlapping packages.\n")
14752                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14753
14754                 return retval
14755
14756 def multiple_actions(action1, action2):
14757         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14758         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14759         sys.exit(1)
14760
14761 def insert_optional_args(args):
14762         """
14763         Parse optional arguments and insert a value if one has
14764         not been provided. This is done before feeding the args
14765         to the optparse parser since that parser does not support
14766         this feature natively.
14767         """
14768
14769         new_args = []
14770         jobs_opts = ("-j", "--jobs")
14771         root_deps_opt = '--root-deps'
14772         root_deps_choices = ('True', 'rdeps')
14773         arg_stack = args[:]
14774         arg_stack.reverse()
14775         while arg_stack:
14776                 arg = arg_stack.pop()
14777
14778                 if arg == root_deps_opt:
14779                         new_args.append(arg)
14780                         if arg_stack and arg_stack[-1] in root_deps_choices:
14781                                 new_args.append(arg_stack.pop())
14782                         else:
14783                                 # insert default argument
14784                                 new_args.append('True')
14785                         continue
14786
14787                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14788                 if not (short_job_opt or arg in jobs_opts):
14789                         new_args.append(arg)
14790                         continue
14791
14792                 # Insert an empty placeholder in order to
14793                 # satisfy the requirements of optparse.
14794
14795                 new_args.append("--jobs")
14796                 job_count = None
14797                 saved_opts = None
14798                 if short_job_opt and len(arg) > 2:
14799                         if arg[:2] == "-j":
14800                                 try:
14801                                         job_count = int(arg[2:])
14802                                 except ValueError:
14803                                         saved_opts = arg[2:]
14804                         else:
14805                                 job_count = "True"
14806                                 saved_opts = arg[1:].replace("j", "")
14807
14808                 if job_count is None and arg_stack:
14809                         try:
14810                                 job_count = int(arg_stack[-1])
14811                         except ValueError:
14812                                 pass
14813                         else:
14814                                 # Discard the job count from the stack
14815                                 # since we're consuming it here.
14816                                 arg_stack.pop()
14817
14818                 if job_count is None:
14819                         # unlimited number of jobs
14820                         new_args.append("True")
14821                 else:
14822                         new_args.append(str(job_count))
14823
14824                 if saved_opts is not None:
14825                         new_args.append("-" + saved_opts)
14826
14827         return new_args
14828
14829 def parse_opts(tmpcmdline, silent=False):
14830         myaction=None
14831         myopts = {}
14832         myfiles=[]
14833
14834         global actions, options, shortmapping
14835
14836         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14837         argument_options = {
14838                 "--config-root": {
14839                         "help":"specify the location for portage configuration files",
14840                         "action":"store"
14841                 },
14842                 "--color": {
14843                         "help":"enable or disable color output",
14844                         "type":"choice",
14845                         "choices":("y", "n")
14846                 },
14847
14848                 "--jobs": {
14849
14850                         "help"   : "Specifies the number of packages to build " + \
14851                                 "simultaneously.",
14852
14853                         "action" : "store"
14854                 },
14855
14856                 "--load-average": {
14857
14858                         "help"   :"Specifies that no new builds should be started " + \
14859                                 "if there are other builds running and the load average " + \
14860                                 "is at least LOAD (a floating-point number).",
14861
14862                         "action" : "store"
14863                 },
14864
14865                 "--with-bdeps": {
14866                         "help":"include unnecessary build time dependencies",
14867                         "type":"choice",
14868                         "choices":("y", "n")
14869                 },
14870                 "--reinstall": {
14871                         "help":"specify conditions to trigger package reinstallation",
14872                         "type":"choice",
14873                         "choices":["changed-use"]
14874                 },
14875                 "--root": {
14876                  "help"   : "specify the target root filesystem for merging packages",
14877                  "action" : "store"
14878                 },
14879
14880                 "--root-deps": {
14881                         "help"    : "modify interpretation of depedencies",
14882                         "type"    : "choice",
14883                         "choices" :("True", "rdeps")
14884                 },
14885         }
14886
14887         from optparse import OptionParser
14888         parser = OptionParser()
14889         if parser.has_option("--help"):
14890                 parser.remove_option("--help")
14891
14892         for action_opt in actions:
14893                 parser.add_option("--" + action_opt, action="store_true",
14894                         dest=action_opt.replace("-", "_"), default=False)
14895         for myopt in options:
14896                 parser.add_option(myopt, action="store_true",
14897                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14898         for shortopt, longopt in shortmapping.iteritems():
14899                 parser.add_option("-" + shortopt, action="store_true",
14900                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14901         for myalias, myopt in longopt_aliases.iteritems():
14902                 parser.add_option(myalias, action="store_true",
14903                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14904
14905         for myopt, kwargs in argument_options.iteritems():
14906                 parser.add_option(myopt,
14907                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14908
14909         tmpcmdline = insert_optional_args(tmpcmdline)
14910
14911         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14912
14913         if myoptions.root_deps == "True":
14914                 myoptions.root_deps = True
14915
14916         if myoptions.jobs:
14917                 jobs = None
14918                 if myoptions.jobs == "True":
14919                         jobs = True
14920                 else:
14921                         try:
14922                                 jobs = int(myoptions.jobs)
14923                         except ValueError:
14924                                 jobs = -1
14925
14926                 if jobs is not True and \
14927                         jobs < 1:
14928                         jobs = None
14929                         if not silent:
14930                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14931                                         (myoptions.jobs,), noiselevel=-1)
14932
14933                 myoptions.jobs = jobs
14934
14935         if myoptions.load_average:
14936                 try:
14937                         load_average = float(myoptions.load_average)
14938                 except ValueError:
14939                         load_average = 0.0
14940
14941                 if load_average <= 0.0:
14942                         load_average = None
14943                         if not silent:
14944                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14945                                         (myoptions.load_average,), noiselevel=-1)
14946
14947                 myoptions.load_average = load_average
14948
14949         for myopt in options:
14950                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14951                 if v:
14952                         myopts[myopt] = True
14953
14954         for myopt in argument_options:
14955                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14956                 if v is not None:
14957                         myopts[myopt] = v
14958
14959         if myoptions.searchdesc:
14960                 myoptions.search = True
14961
14962         for action_opt in actions:
14963                 v = getattr(myoptions, action_opt.replace("-", "_"))
14964                 if v:
14965                         if myaction:
14966                                 multiple_actions(myaction, action_opt)
14967                                 sys.exit(1)
14968                         myaction = action_opt
14969
14970         myfiles += myargs
14971
14972         return myaction, myopts, myfiles
14973
14974 def validate_ebuild_environment(trees):
14975         for myroot in trees:
14976                 settings = trees[myroot]["vartree"].settings
14977                 settings.validate()
14978
14979 def clear_caches(trees):
14980         for d in trees.itervalues():
14981                 d["porttree"].dbapi.melt()
14982                 d["porttree"].dbapi._aux_cache.clear()
14983                 d["bintree"].dbapi._aux_cache.clear()
14984                 d["bintree"].dbapi._clear_cache()
14985                 d["vartree"].dbapi.linkmap._clear_cache()
14986         portage.dircache.clear()
14987         gc.collect()
14988
14989 def load_emerge_config(trees=None):
14990         kwargs = {}
14991         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14992                 v = os.environ.get(envvar, None)
14993                 if v and v.strip():
14994                         kwargs[k] = v
14995         trees = portage.create_trees(trees=trees, **kwargs)
14996
14997         for root, root_trees in trees.iteritems():
14998                 settings = root_trees["vartree"].settings
14999                 setconfig = load_default_config(settings, root_trees)
15000                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15001
15002         settings = trees["/"]["vartree"].settings
15003
15004         for myroot in trees:
15005                 if myroot != "/":
15006                         settings = trees[myroot]["vartree"].settings
15007                         break
15008
15009         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15010         mtimedb = portage.MtimeDB(mtimedbfile)
15011         
15012         return settings, trees, mtimedb
15013
15014 def adjust_config(myopts, settings):
15015         """Make emerge specific adjustments to the config."""
15016
15017         # To enhance usability, make some vars case insensitive by forcing them to
15018         # lower case.
15019         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15020                 if myvar in settings:
15021                         settings[myvar] = settings[myvar].lower()
15022                         settings.backup_changes(myvar)
15023         del myvar
15024
15025         # Kill noauto as it will break merges otherwise.
15026         if "noauto" in settings.features:
15027                 settings.features.remove('noauto')
15028                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15029                 settings.backup_changes("FEATURES")
15030
15031         CLEAN_DELAY = 5
15032         try:
15033                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15034         except ValueError, e:
15035                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15036                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15037                         settings["CLEAN_DELAY"], noiselevel=-1)
15038         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15039         settings.backup_changes("CLEAN_DELAY")
15040
15041         EMERGE_WARNING_DELAY = 10
15042         try:
15043                 EMERGE_WARNING_DELAY = int(settings.get(
15044                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15045         except ValueError, e:
15046                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15047                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15048                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15049         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15050         settings.backup_changes("EMERGE_WARNING_DELAY")
15051
15052         if "--quiet" in myopts:
15053                 settings["PORTAGE_QUIET"]="1"
15054                 settings.backup_changes("PORTAGE_QUIET")
15055
15056         if "--verbose" in myopts:
15057                 settings["PORTAGE_VERBOSE"] = "1"
15058                 settings.backup_changes("PORTAGE_VERBOSE")
15059
15060         # Set so that configs will be merged regardless of remembered status
15061         if ("--noconfmem" in myopts):
15062                 settings["NOCONFMEM"]="1"
15063                 settings.backup_changes("NOCONFMEM")
15064
15065         # Set various debug markers... They should be merged somehow.
15066         PORTAGE_DEBUG = 0
15067         try:
15068                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15069                 if PORTAGE_DEBUG not in (0, 1):
15070                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15071                                 PORTAGE_DEBUG, noiselevel=-1)
15072                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15073                                 noiselevel=-1)
15074                         PORTAGE_DEBUG = 0
15075         except ValueError, e:
15076                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15077                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15078                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15079                 del e
15080         if "--debug" in myopts:
15081                 PORTAGE_DEBUG = 1
15082         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15083         settings.backup_changes("PORTAGE_DEBUG")
15084
15085         if settings.get("NOCOLOR") not in ("yes","true"):
15086                 portage.output.havecolor = 1
15087
15088         """The explicit --color < y | n > option overrides the NOCOLOR environment
15089         variable and stdout auto-detection."""
15090         if "--color" in myopts:
15091                 if "y" == myopts["--color"]:
15092                         portage.output.havecolor = 1
15093                         settings["NOCOLOR"] = "false"
15094                 else:
15095                         portage.output.havecolor = 0
15096                         settings["NOCOLOR"] = "true"
15097                 settings.backup_changes("NOCOLOR")
15098         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15099                 portage.output.havecolor = 0
15100                 settings["NOCOLOR"] = "true"
15101                 settings.backup_changes("NOCOLOR")
15102
15103 def apply_priorities(settings):
15104         ionice(settings)
15105         nice(settings)
15106
15107 def nice(settings):
15108         try:
15109                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15110         except (OSError, ValueError), e:
15111                 out = portage.output.EOutput()
15112                 out.eerror("Failed to change nice value to '%s'" % \
15113                         settings["PORTAGE_NICENESS"])
15114                 out.eerror("%s\n" % str(e))
15115
15116 def ionice(settings):
15117
15118         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15119         if ionice_cmd:
15120                 ionice_cmd = shlex.split(ionice_cmd)
15121         if not ionice_cmd:
15122                 return
15123
15124         from portage.util import varexpand
15125         variables = {"PID" : str(os.getpid())}
15126         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15127
15128         try:
15129                 rval = portage.process.spawn(cmd, env=os.environ)
15130         except portage.exception.CommandNotFound:
15131                 # The OS kernel probably doesn't support ionice,
15132                 # so return silently.
15133                 return
15134
15135         if rval != os.EX_OK:
15136                 out = portage.output.EOutput()
15137                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15138                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15139
15140 def display_missing_pkg_set(root_config, set_name):
15141
15142         msg = []
15143         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15144                 "The following sets exist:") % \
15145                 colorize("INFORM", set_name))
15146         msg.append("")
15147
15148         for s in sorted(root_config.sets):
15149                 msg.append("    %s" % s)
15150         msg.append("")
15151
15152         writemsg_level("".join("%s\n" % l for l in msg),
15153                 level=logging.ERROR, noiselevel=-1)
15154
15155 def expand_set_arguments(myfiles, myaction, root_config):
15156         retval = os.EX_OK
15157         setconfig = root_config.setconfig
15158
15159         sets = setconfig.getSets()
15160
15161         # In order to know exactly which atoms/sets should be added to the
15162         # world file, the depgraph performs set expansion later. It will get
15163         # confused about where the atoms came from if it's not allowed to
15164         # expand them itself.
15165         do_not_expand = (None, )
15166         newargs = []
15167         for a in myfiles:
15168                 if a in ("system", "world"):
15169                         newargs.append(SETPREFIX+a)
15170                 else:
15171                         newargs.append(a)
15172         myfiles = newargs
15173         del newargs
15174         newargs = []
15175
15176         # separators for set arguments
15177         ARG_START = "{"
15178         ARG_END = "}"
15179
15180         # WARNING: all operators must be of equal length
15181         IS_OPERATOR = "/@"
15182         DIFF_OPERATOR = "-@"
15183         UNION_OPERATOR = "+@"
15184         
15185         for i in range(0, len(myfiles)):
15186                 if myfiles[i].startswith(SETPREFIX):
15187                         start = 0
15188                         end = 0
15189                         x = myfiles[i][len(SETPREFIX):]
15190                         newset = ""
15191                         while x:
15192                                 start = x.find(ARG_START)
15193                                 end = x.find(ARG_END)
15194                                 if start > 0 and start < end:
15195                                         namepart = x[:start]
15196                                         argpart = x[start+1:end]
15197                                 
15198                                         # TODO: implement proper quoting
15199                                         args = argpart.split(",")
15200                                         options = {}
15201                                         for a in args:
15202                                                 if "=" in a:
15203                                                         k, v  = a.split("=", 1)
15204                                                         options[k] = v
15205                                                 else:
15206                                                         options[a] = "True"
15207                                         setconfig.update(namepart, options)
15208                                         newset += (x[:start-len(namepart)]+namepart)
15209                                         x = x[end+len(ARG_END):]
15210                                 else:
15211                                         newset += x
15212                                         x = ""
15213                         myfiles[i] = SETPREFIX+newset
15214                                 
15215         sets = setconfig.getSets()
15216
15217         # display errors that occured while loading the SetConfig instance
15218         for e in setconfig.errors:
15219                 print colorize("BAD", "Error during set creation: %s" % e)
15220         
15221         # emerge relies on the existance of sets with names "world" and "system"
15222         required_sets = ("world", "system")
15223         missing_sets = []
15224
15225         for s in required_sets:
15226                 if s not in sets:
15227                         missing_sets.append(s)
15228         if missing_sets:
15229                 if len(missing_sets) > 2:
15230                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15231                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15232                 elif len(missing_sets) == 2:
15233                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15234                 else:
15235                         missing_sets_str = '"%s"' % missing_sets[-1]
15236                 msg = ["emerge: incomplete set configuration, " + \
15237                         "missing set(s): %s" % missing_sets_str]
15238                 if sets:
15239                         msg.append("        sets defined: %s" % ", ".join(sets))
15240                 msg.append("        This usually means that '%s'" % \
15241                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15242                 msg.append("        is missing or corrupt.")
15243                 for line in msg:
15244                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15245                 return (None, 1)
15246         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15247
15248         for a in myfiles:
15249                 if a.startswith(SETPREFIX):
15250                         # support simple set operations (intersection, difference and union)
15251                         # on the commandline. Expressions are evaluated strictly left-to-right
15252                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15253                                 expression = a[len(SETPREFIX):]
15254                                 expr_sets = []
15255                                 expr_ops = []
15256                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15257                                         is_pos = expression.rfind(IS_OPERATOR)
15258                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15259                                         union_pos = expression.rfind(UNION_OPERATOR)
15260                                         op_pos = max(is_pos, diff_pos, union_pos)
15261                                         s1 = expression[:op_pos]
15262                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15263                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15264                                         if not s2 in sets:
15265                                                 display_missing_pkg_set(root_config, s2)
15266                                                 return (None, 1)
15267                                         expr_sets.insert(0, s2)
15268                                         expr_ops.insert(0, op)
15269                                         expression = s1
15270                                 if not expression in sets:
15271                                         display_missing_pkg_set(root_config, expression)
15272                                         return (None, 1)
15273                                 expr_sets.insert(0, expression)
15274                                 result = set(setconfig.getSetAtoms(expression))
15275                                 for i in range(0, len(expr_ops)):
15276                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15277                                         if expr_ops[i] == IS_OPERATOR:
15278                                                 result.intersection_update(s2)
15279                                         elif expr_ops[i] == DIFF_OPERATOR:
15280                                                 result.difference_update(s2)
15281                                         elif expr_ops[i] == UNION_OPERATOR:
15282                                                 result.update(s2)
15283                                         else:
15284                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15285                                 newargs.extend(result)
15286                         else:                   
15287                                 s = a[len(SETPREFIX):]
15288                                 if s not in sets:
15289                                         display_missing_pkg_set(root_config, s)
15290                                         return (None, 1)
15291                                 setconfig.active.append(s)
15292                                 try:
15293                                         set_atoms = setconfig.getSetAtoms(s)
15294                                 except portage.exception.PackageSetNotFound, e:
15295                                         writemsg_level(("emerge: the given set '%s' " + \
15296                                                 "contains a non-existent set named '%s'.\n") % \
15297                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15298                                         return (None, 1)
15299                                 if myaction in unmerge_actions and \
15300                                                 not sets[s].supportsOperation("unmerge"):
15301                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15302                                                 "not support unmerge operations\n")
15303                                         retval = 1
15304                                 elif not set_atoms:
15305                                         print "emerge: '%s' is an empty set" % s
15306                                 elif myaction not in do_not_expand:
15307                                         newargs.extend(set_atoms)
15308                                 else:
15309                                         newargs.append(SETPREFIX+s)
15310                                 for e in sets[s].errors:
15311                                         print e
15312                 else:
15313                         newargs.append(a)
15314         return (newargs, retval)
15315
15316 def repo_name_check(trees):
15317         missing_repo_names = set()
15318         for root, root_trees in trees.iteritems():
15319                 if "porttree" in root_trees:
15320                         portdb = root_trees["porttree"].dbapi
15321                         missing_repo_names.update(portdb.porttrees)
15322                         repos = portdb.getRepositories()
15323                         for r in repos:
15324                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15325                         if portdb.porttree_root in missing_repo_names and \
15326                                 not os.path.exists(os.path.join(
15327                                 portdb.porttree_root, "profiles")):
15328                                 # This is normal if $PORTDIR happens to be empty,
15329                                 # so don't warn about it.
15330                                 missing_repo_names.remove(portdb.porttree_root)
15331
15332         if missing_repo_names:
15333                 msg = []
15334                 msg.append("WARNING: One or more repositories " + \
15335                         "have missing repo_name entries:")
15336                 msg.append("")
15337                 for p in missing_repo_names:
15338                         msg.append("\t%s/profiles/repo_name" % (p,))
15339                 msg.append("")
15340                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15341                         "should be a plain text file containing a unique " + \
15342                         "name for the repository on the first line.", 70))
15343                 writemsg_level("".join("%s\n" % l for l in msg),
15344                         level=logging.WARNING, noiselevel=-1)
15345
15346         return bool(missing_repo_names)
15347
15348 def config_protect_check(trees):
15349         for root, root_trees in trees.iteritems():
15350                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15351                         msg = "!!! CONFIG_PROTECT is empty"
15352                         if root != "/":
15353                                 msg += " for '%s'" % root
15354                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15355
15356 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15357
15358         if "--quiet" in myopts:
15359                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15360                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15361                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15362                         print "    " + colorize("INFORM", cp)
15363                 return
15364
15365         s = search(root_config, spinner, "--searchdesc" in myopts,
15366                 "--quiet" not in myopts, "--usepkg" in myopts,
15367                 "--usepkgonly" in myopts)
15368         null_cp = portage.dep_getkey(insert_category_into_atom(
15369                 arg, "null"))
15370         cat, atom_pn = portage.catsplit(null_cp)
15371         s.searchkey = atom_pn
15372         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15373                 s.addCP(cp)
15374         s.output()
15375         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15376         print "!!! one of the above fully-qualified ebuild names instead.\n"
15377
15378 def profile_check(trees, myaction, myopts):
15379         if myaction in ("info", "sync"):
15380                 return os.EX_OK
15381         elif "--version" in myopts or "--help" in myopts:
15382                 return os.EX_OK
15383         for root, root_trees in trees.iteritems():
15384                 if root_trees["root_config"].settings.profiles:
15385                         continue
15386                 # generate some profile related warning messages
15387                 validate_ebuild_environment(trees)
15388                 msg = "If you have just changed your profile configuration, you " + \
15389                         "should revert back to the previous configuration. Due to " + \
15390                         "your current profile being invalid, allowed actions are " + \
15391                         "limited to --help, --info, --sync, and --version."
15392                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15393                         level=logging.ERROR, noiselevel=-1)
15394                 return 1
15395         return os.EX_OK
15396
15397 def emerge_main():
15398         global portage  # NFC why this is necessary now - genone
15399         portage._disable_legacy_globals()
15400         # Disable color until we're sure that it should be enabled (after
15401         # EMERGE_DEFAULT_OPTS has been parsed).
15402         portage.output.havecolor = 0
15403         # This first pass is just for options that need to be known as early as
15404         # possible, such as --config-root.  They will be parsed again later,
15405         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15406         # the value of --config-root).
15407         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15408         if "--debug" in myopts:
15409                 os.environ["PORTAGE_DEBUG"] = "1"
15410         if "--config-root" in myopts:
15411                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15412         if "--root" in myopts:
15413                 os.environ["ROOT"] = myopts["--root"]
15414
15415         # Portage needs to ensure a sane umask for the files it creates.
15416         os.umask(022)
15417         settings, trees, mtimedb = load_emerge_config()
15418         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15419         rval = profile_check(trees, myaction, myopts)
15420         if rval != os.EX_OK:
15421                 return rval
15422
15423         if portage._global_updates(trees, mtimedb["updates"]):
15424                 mtimedb.commit()
15425                 # Reload the whole config from scratch.
15426                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15427                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15428
15429         xterm_titles = "notitles" not in settings.features
15430
15431         tmpcmdline = []
15432         if "--ignore-default-opts" not in myopts:
15433                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15434         tmpcmdline.extend(sys.argv[1:])
15435         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15436
15437         if "--digest" in myopts:
15438                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15439                 # Reload the whole config from scratch so that the portdbapi internal
15440                 # config is updated with new FEATURES.
15441                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15442                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15443
15444         for myroot in trees:
15445                 mysettings =  trees[myroot]["vartree"].settings
15446                 mysettings.unlock()
15447                 adjust_config(myopts, mysettings)
15448                 if '--pretend' not in myopts and myaction in \
15449                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15450                         mysettings["PORTAGE_COUNTER_HASH"] = \
15451                                 trees[myroot]["vartree"].dbapi._counter_hash()
15452                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15453                 mysettings.lock()
15454                 del myroot, mysettings
15455
15456         apply_priorities(settings)
15457
15458         spinner = stdout_spinner()
15459         if "candy" in settings.features:
15460                 spinner.update = spinner.update_scroll
15461
15462         if "--quiet" not in myopts:
15463                 portage.deprecated_profile_check(settings=settings)
15464                 repo_name_check(trees)
15465                 config_protect_check(trees)
15466
15467         for mytrees in trees.itervalues():
15468                 mydb = mytrees["porttree"].dbapi
15469                 # Freeze the portdbapi for performance (memoize all xmatch results).
15470                 mydb.freeze()
15471         del mytrees, mydb
15472
15473         if "moo" in myfiles:
15474                 print """
15475
15476   Larry loves Gentoo (""" + platform.system() + """)
15477
15478  _______________________
15479 < Have you mooed today? >
15480  -----------------------
15481         \   ^__^
15482          \  (oo)\_______
15483             (__)\       )\/\ 
15484                 ||----w |
15485                 ||     ||
15486
15487 """
15488
15489         for x in myfiles:
15490                 ext = os.path.splitext(x)[1]
15491                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15492                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15493                         break
15494
15495         root_config = trees[settings["ROOT"]]["root_config"]
15496         if myaction == "list-sets":
15497                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15498                 sys.stdout.flush()
15499                 return os.EX_OK
15500
15501         # only expand sets for actions taking package arguments
15502         oldargs = myfiles[:]
15503         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15504                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15505                 if retval != os.EX_OK:
15506                         return retval
15507
15508                 # Need to handle empty sets specially, otherwise emerge will react 
15509                 # with the help message for empty argument lists
15510                 if oldargs and not myfiles:
15511                         print "emerge: no targets left after set expansion"
15512                         return 0
15513
15514         if ("--tree" in myopts) and ("--columns" in myopts):
15515                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15516                 return 1
15517
15518         if ("--quiet" in myopts):
15519                 spinner.update = spinner.update_quiet
15520                 portage.util.noiselimit = -1
15521
15522         # Always create packages if FEATURES=buildpkg
15523         # Imply --buildpkg if --buildpkgonly
15524         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15525                 if "--buildpkg" not in myopts:
15526                         myopts["--buildpkg"] = True
15527
15528         # Always try and fetch binary packages if FEATURES=getbinpkg
15529         if ("getbinpkg" in settings.features):
15530                 myopts["--getbinpkg"] = True
15531
15532         if "--buildpkgonly" in myopts:
15533                 # --buildpkgonly will not merge anything, so
15534                 # it cancels all binary package options.
15535                 for opt in ("--getbinpkg", "--getbinpkgonly",
15536                         "--usepkg", "--usepkgonly"):
15537                         myopts.pop(opt, None)
15538
15539         if "--fetch-all-uri" in myopts:
15540                 myopts["--fetchonly"] = True
15541
15542         if "--skipfirst" in myopts and "--resume" not in myopts:
15543                 myopts["--resume"] = True
15544
15545         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15546                 myopts["--usepkgonly"] = True
15547
15548         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15549                 myopts["--getbinpkg"] = True
15550
15551         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15552                 myopts["--usepkg"] = True
15553
15554         # Also allow -K to apply --usepkg/-k
15555         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15556                 myopts["--usepkg"] = True
15557
15558         # Allow -p to remove --ask
15559         if ("--pretend" in myopts) and ("--ask" in myopts):
15560                 print ">>> --pretend disables --ask... removing --ask from options."
15561                 del myopts["--ask"]
15562
15563         # forbid --ask when not in a terminal
15564         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15565         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15566                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15567                         noiselevel=-1)
15568                 return 1
15569
15570         if settings.get("PORTAGE_DEBUG", "") == "1":
15571                 spinner.update = spinner.update_quiet
15572                 portage.debug=1
15573                 if "python-trace" in settings.features:
15574                         import portage.debug
15575                         portage.debug.set_trace(True)
15576
15577         if not ("--quiet" in myopts):
15578                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15579                         spinner.update = spinner.update_basic
15580
15581         if myaction == 'version':
15582                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15583                         settings.profile_path, settings["CHOST"],
15584                         trees[settings["ROOT"]]["vartree"].dbapi)
15585                 return 0
15586         elif "--help" in myopts:
15587                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15588                 return 0
15589
15590         if "--debug" in myopts:
15591                 print "myaction", myaction
15592                 print "myopts", myopts
15593
15594         if not myaction and not myfiles and "--resume" not in myopts:
15595                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15596                 return 1
15597
15598         pretend = "--pretend" in myopts
15599         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15600         buildpkgonly = "--buildpkgonly" in myopts
15601
15602         # check if root user is the current user for the actions where emerge needs this
15603         if portage.secpass < 2:
15604                 # We've already allowed "--version" and "--help" above.
15605                 if "--pretend" not in myopts and myaction not in ("search","info"):
15606                         need_superuser = not \
15607                                 (fetchonly or \
15608                                 (buildpkgonly and secpass >= 1) or \
15609                                 myaction in ("metadata", "regen") or \
15610                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15611                         if portage.secpass < 1 or \
15612                                 need_superuser:
15613                                 if need_superuser:
15614                                         access_desc = "superuser"
15615                                 else:
15616                                         access_desc = "portage group"
15617                                 # Always show portage_group_warning() when only portage group
15618                                 # access is required but the user is not in the portage group.
15619                                 from portage.data import portage_group_warning
15620                                 if "--ask" in myopts:
15621                                         myopts["--pretend"] = True
15622                                         del myopts["--ask"]
15623                                         print ("%s access is required... " + \
15624                                                 "adding --pretend to options.\n") % access_desc
15625                                         if portage.secpass < 1 and not need_superuser:
15626                                                 portage_group_warning()
15627                                 else:
15628                                         sys.stderr.write(("emerge: %s access is " + \
15629                                                 "required.\n\n") % access_desc)
15630                                         if portage.secpass < 1 and not need_superuser:
15631                                                 portage_group_warning()
15632                                         return 1
15633
15634         disable_emergelog = False
15635         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15636                 if x in myopts:
15637                         disable_emergelog = True
15638                         break
15639         if myaction in ("search", "info"):
15640                 disable_emergelog = True
15641         if disable_emergelog:
15642                 """ Disable emergelog for everything except build or unmerge
15643                 operations.  This helps minimize parallel emerge.log entries that can
15644                 confuse log parsers.  We especially want it disabled during
15645                 parallel-fetch, which uses --resume --fetchonly."""
15646                 global emergelog
15647                 def emergelog(*pargs, **kargs):
15648                         pass
15649
15650         if not "--pretend" in myopts:
15651                 emergelog(xterm_titles, "Started emerge on: "+\
15652                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15653                 myelogstr=""
15654                 if myopts:
15655                         myelogstr=" ".join(myopts)
15656                 if myaction:
15657                         myelogstr+=" "+myaction
15658                 if myfiles:
15659                         myelogstr += " " + " ".join(oldargs)
15660                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15661         del oldargs
15662
15663         def emergeexitsig(signum, frame):
15664                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15665                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15666                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15667                 sys.exit(100+signum)
15668         signal.signal(signal.SIGINT, emergeexitsig)
15669         signal.signal(signal.SIGTERM, emergeexitsig)
15670
15671         def emergeexit():
15672                 """This gets out final log message in before we quit."""
15673                 if "--pretend" not in myopts:
15674                         emergelog(xterm_titles, " *** terminating.")
15675                 if "notitles" not in settings.features:
15676                         xtermTitleReset()
15677         portage.atexit_register(emergeexit)
15678
15679         if myaction in ("config", "metadata", "regen", "sync"):
15680                 if "--pretend" in myopts:
15681                         sys.stderr.write(("emerge: The '%s' action does " + \
15682                                 "not support '--pretend'.\n") % myaction)
15683                         return 1
15684
15685         if "sync" == myaction:
15686                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15687         elif "metadata" == myaction:
15688                 action_metadata(settings, portdb, myopts)
15689         elif myaction=="regen":
15690                 validate_ebuild_environment(trees)
15691                 return action_regen(settings, portdb, myopts.get("--jobs"),
15692                         myopts.get("--load-average"))
15693         # HELP action
15694         elif "config"==myaction:
15695                 validate_ebuild_environment(trees)
15696                 action_config(settings, trees, myopts, myfiles)
15697
15698         # SEARCH action
15699         elif "search"==myaction:
15700                 validate_ebuild_environment(trees)
15701                 action_search(trees[settings["ROOT"]]["root_config"],
15702                         myopts, myfiles, spinner)
15703         elif myaction in ("clean", "unmerge") or \
15704                 (myaction == "prune" and "--nodeps" in myopts):
15705                 validate_ebuild_environment(trees)
15706
15707                 # Ensure atoms are valid before calling unmerge().
15708                 # For backward compat, leading '=' is not required.
15709                 for x in myfiles:
15710                         if is_valid_package_atom(x) or \
15711                                 is_valid_package_atom("=" + x):
15712                                 continue
15713                         msg = []
15714                         msg.append("'%s' is not a valid package atom." % (x,))
15715                         msg.append("Please check ebuild(5) for full details.")
15716                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15717                                 level=logging.ERROR, noiselevel=-1)
15718                         return 1
15719
15720                 # When given a list of atoms, unmerge
15721                 # them in the order given.
15722                 ordered = myaction == "unmerge"
15723                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15724                         mtimedb["ldpath"], ordered=ordered):
15725                         if not (buildpkgonly or fetchonly or pretend):
15726                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15727
15728         elif myaction in ("depclean", "info", "prune"):
15729
15730                 # Ensure atoms are valid before calling unmerge().
15731                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15732                 valid_atoms = []
15733                 for x in myfiles:
15734                         if is_valid_package_atom(x):
15735                                 try:
15736                                         valid_atoms.append(
15737                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15738                                 except portage.exception.AmbiguousPackageName, e:
15739                                         msg = "The short ebuild name \"" + x + \
15740                                                 "\" is ambiguous.  Please specify " + \
15741                                                 "one of the following " + \
15742                                                 "fully-qualified ebuild names instead:"
15743                                         for line in textwrap.wrap(msg, 70):
15744                                                 writemsg_level("!!! %s\n" % (line,),
15745                                                         level=logging.ERROR, noiselevel=-1)
15746                                         for i in e[0]:
15747                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15748                                                         level=logging.ERROR, noiselevel=-1)
15749                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15750                                         return 1
15751                                 continue
15752                         msg = []
15753                         msg.append("'%s' is not a valid package atom." % (x,))
15754                         msg.append("Please check ebuild(5) for full details.")
15755                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15756                                 level=logging.ERROR, noiselevel=-1)
15757                         return 1
15758
15759                 if myaction == "info":
15760                         return action_info(settings, trees, myopts, valid_atoms)
15761
15762                 validate_ebuild_environment(trees)
15763                 action_depclean(settings, trees, mtimedb["ldpath"],
15764                         myopts, myaction, valid_atoms, spinner)
15765                 if not (buildpkgonly or fetchonly or pretend):
15766                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15767         # "update", "system", or just process files:
15768         else:
15769                 validate_ebuild_environment(trees)
15770
15771                 for x in myfiles:
15772                         if x.startswith(SETPREFIX) or \
15773                                 is_valid_package_atom(x):
15774                                 continue
15775                         if x[:1] == os.sep:
15776                                 continue
15777                         try:
15778                                 os.lstat(x)
15779                                 continue
15780                         except OSError:
15781                                 pass
15782                         msg = []
15783                         msg.append("'%s' is not a valid package atom." % (x,))
15784                         msg.append("Please check ebuild(5) for full details.")
15785                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15786                                 level=logging.ERROR, noiselevel=-1)
15787                         return 1
15788
15789                 if "--pretend" not in myopts:
15790                         display_news_notification(root_config, myopts)
15791                 retval = action_build(settings, trees, mtimedb,
15792                         myopts, myaction, myfiles, spinner)
15793                 root_config = trees[settings["ROOT"]]["root_config"]
15794                 post_emerge(root_config, myopts, mtimedb, retval)
15795
15796                 return retval