Make emerge --info <atom> display USE similarly to how they are displayed
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.data import secpass
53 from portage.elog.messages import eerror
54 from portage.util import normalize_path as normpath
55 from portage.util import cmp_sort_key, writemsg, writemsg_level
56 from portage.sets import load_default_config, SETPREFIX
57 from portage.sets.base import InternalPackageSet
58
59 from itertools import chain, izip
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         from cStringIO import StringIO
68 except ImportError:
69         from StringIO import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge", "version",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if isinstance(mysize, basestring):
282                 return mysize
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 if setconfig is None:
774                         self.sets = {}
775                 else:
776                         self.sets = self.setconfig.getSets()
777                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
778
779 def create_world_atom(pkg, args_set, root_config):
780         """Create a new atom for the world file if one does not exist.  If the
781         argument atom is precise enough to identify a specific slot then a slot
782         atom will be returned. Atoms that are in the system set may also be stored
783         in world since system atoms can only match one slot while world atoms can
784         be greedy with respect to slots.  Unslotted system packages will not be
785         stored in world."""
786
787         arg_atom = args_set.findAtomForPackage(pkg)
788         if not arg_atom:
789                 return None
790         cp = portage.dep_getkey(arg_atom)
791         new_world_atom = cp
792         sets = root_config.sets
793         portdb = root_config.trees["porttree"].dbapi
794         vardb = root_config.trees["vartree"].dbapi
795         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
796                 for cpv in portdb.match(cp))
797         slotted = len(available_slots) > 1 or \
798                 (len(available_slots) == 1 and "0" not in available_slots)
799         if not slotted:
800                 # check the vdb in case this is multislot
801                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
802                         for cpv in vardb.match(cp))
803                 slotted = len(available_slots) > 1 or \
804                         (len(available_slots) == 1 and "0" not in available_slots)
805         if slotted and arg_atom != cp:
806                 # If the user gave a specific atom, store it as a
807                 # slot atom in the world file.
808                 slot_atom = pkg.slot_atom
809
810                 # For USE=multislot, there are a couple of cases to
811                 # handle here:
812                 #
813                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
814                 #    unknown value, so just record an unslotted atom.
815                 #
816                 # 2) SLOT comes from an installed package and there is no
817                 #    matching SLOT in the portage tree.
818                 #
819                 # Make sure that the slot atom is available in either the
820                 # portdb or the vardb, since otherwise the user certainly
821                 # doesn't want the SLOT atom recorded in the world file
822                 # (case 1 above).  If it's only available in the vardb,
823                 # the user may be trying to prevent a USE=multislot
824                 # package from being removed by --depclean (case 2 above).
825
826                 mydb = portdb
827                 if not portdb.match(slot_atom):
828                         # SLOT seems to come from an installed multislot package
829                         mydb = vardb
830                 # If there is no installed package matching the SLOT atom,
831                 # it probably changed SLOT spontaneously due to USE=multislot,
832                 # so just record an unslotted atom.
833                 if vardb.match(slot_atom):
834                         # Now verify that the argument is precise
835                         # enough to identify a specific slot.
836                         matches = mydb.match(arg_atom)
837                         matched_slots = set()
838                         for cpv in matches:
839                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
840                         if len(matched_slots) == 1:
841                                 new_world_atom = slot_atom
842
843         if new_world_atom == sets["world"].findAtomForPackage(pkg):
844                 # Both atoms would be identical, so there's nothing to add.
845                 return None
846         if not slotted:
847                 # Unlike world atoms, system atoms are not greedy for slots, so they
848                 # can't be safely excluded from world if they are slotted.
849                 system_atom = sets["system"].findAtomForPackage(pkg)
850                 if system_atom:
851                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
852                                 return None
853                         # System virtuals aren't safe to exclude from world since they can
854                         # match multiple old-style virtuals but only one of them will be
855                         # pulled in by update or depclean.
856                         providers = portdb.mysettings.getvirtuals().get(
857                                 portage.dep_getkey(system_atom))
858                         if providers and len(providers) == 1 and providers[0] == cp:
859                                 return None
860         return new_world_atom
861
862 def filter_iuse_defaults(iuse):
863         for flag in iuse:
864                 if flag.startswith("+") or flag.startswith("-"):
865                         yield flag[1:]
866                 else:
867                         yield flag
868
869 class SlotObject(object):
870         __slots__ = ("__weakref__",)
871
872         def __init__(self, **kwargs):
873                 classes = [self.__class__]
874                 while classes:
875                         c = classes.pop()
876                         if c is SlotObject:
877                                 continue
878                         classes.extend(c.__bases__)
879                         slots = getattr(c, "__slots__", None)
880                         if not slots:
881                                 continue
882                         for myattr in slots:
883                                 myvalue = kwargs.get(myattr, None)
884                                 setattr(self, myattr, myvalue)
885
886         def copy(self):
887                 """
888                 Create a new instance and copy all attributes
889                 defined from __slots__ (including those from
890                 inherited classes).
891                 """
892                 obj = self.__class__()
893
894                 classes = [self.__class__]
895                 while classes:
896                         c = classes.pop()
897                         if c is SlotObject:
898                                 continue
899                         classes.extend(c.__bases__)
900                         slots = getattr(c, "__slots__", None)
901                         if not slots:
902                                 continue
903                         for myattr in slots:
904                                 setattr(obj, myattr, getattr(self, myattr))
905
906                 return obj
907
908 class AbstractDepPriority(SlotObject):
909         __slots__ = ("buildtime", "runtime", "runtime_post")
910
911         def __lt__(self, other):
912                 return self.__int__() < other
913
914         def __le__(self, other):
915                 return self.__int__() <= other
916
917         def __eq__(self, other):
918                 return self.__int__() == other
919
920         def __ne__(self, other):
921                 return self.__int__() != other
922
923         def __gt__(self, other):
924                 return self.__int__() > other
925
926         def __ge__(self, other):
927                 return self.__int__() >= other
928
929         def copy(self):
930                 import copy
931                 return copy.copy(self)
932
933 class DepPriority(AbstractDepPriority):
934
935         __slots__ = ("satisfied", "optional", "rebuild")
936
937         def __int__(self):
938                 return 0
939
940         def __str__(self):
941                 if self.optional:
942                         return "optional"
943                 if self.buildtime:
944                         return "buildtime"
945                 if self.runtime:
946                         return "runtime"
947                 if self.runtime_post:
948                         return "runtime_post"
949                 return "soft"
950
951 class BlockerDepPriority(DepPriority):
952         __slots__ = ()
953         def __int__(self):
954                 return 0
955
956         def __str__(self):
957                 return 'blocker'
958
959 BlockerDepPriority.instance = BlockerDepPriority()
960
961 class UnmergeDepPriority(AbstractDepPriority):
962         __slots__ = ("optional", "satisfied",)
963         """
964         Combination of properties           Priority  Category
965
966         runtime                                0       HARD
967         runtime_post                          -1       HARD
968         buildtime                             -2       SOFT
969         (none of the above)                   -2       SOFT
970         """
971
972         MAX    =  0
973         SOFT   = -2
974         MIN    = -2
975
976         def __int__(self):
977                 if self.runtime:
978                         return 0
979                 if self.runtime_post:
980                         return -1
981                 if self.buildtime:
982                         return -2
983                 return -2
984
985         def __str__(self):
986                 myvalue = self.__int__()
987                 if myvalue > self.SOFT:
988                         return "hard"
989                 return "soft"
990
991 class DepPriorityNormalRange(object):
992         """
993         DepPriority properties              Index      Category
994
995         buildtime                                      HARD
996         runtime                                3       MEDIUM
997         runtime_post                           2       MEDIUM_SOFT
998         optional                               1       SOFT
999         (none of the above)                    0       NONE
1000         """
1001         MEDIUM      = 3
1002         MEDIUM_SOFT = 2
1003         SOFT        = 1
1004         NONE        = 0
1005
1006         @classmethod
1007         def _ignore_optional(cls, priority):
1008                 if priority.__class__ is not DepPriority:
1009                         return False
1010                 return bool(priority.optional)
1011
1012         @classmethod
1013         def _ignore_runtime_post(cls, priority):
1014                 if priority.__class__ is not DepPriority:
1015                         return False
1016                 return bool(priority.optional or priority.runtime_post)
1017
1018         @classmethod
1019         def _ignore_runtime(cls, priority):
1020                 if priority.__class__ is not DepPriority:
1021                         return False
1022                 return not priority.buildtime
1023
1024         ignore_medium      = _ignore_runtime
1025         ignore_medium_soft = _ignore_runtime_post
1026         ignore_soft        = _ignore_optional
1027
1028 DepPriorityNormalRange.ignore_priority = (
1029         None,
1030         DepPriorityNormalRange._ignore_optional,
1031         DepPriorityNormalRange._ignore_runtime_post,
1032         DepPriorityNormalRange._ignore_runtime
1033 )
1034
1035 class DepPrioritySatisfiedRange(object):
1036         """
1037         DepPriority                         Index      Category
1038
1039         not satisfied and buildtime                    HARD
1040         not satisfied and runtime              7       MEDIUM
1041         not satisfied and runtime_post         6       MEDIUM_SOFT
1042         satisfied and buildtime and rebuild    5       SOFT
1043         satisfied and buildtime                4       SOFT
1044         satisfied and runtime                  3       SOFT
1045         satisfied and runtime_post             2       SOFT
1046         optional                               1       SOFT
1047         (none of the above)                    0       NONE
1048         """
1049         MEDIUM      = 7
1050         MEDIUM_SOFT = 6
1051         SOFT        = 5
1052         NONE        = 0
1053
1054         @classmethod
1055         def _ignore_optional(cls, priority):
1056                 if priority.__class__ is not DepPriority:
1057                         return False
1058                 return bool(priority.optional)
1059
1060         @classmethod
1061         def _ignore_satisfied_runtime_post(cls, priority):
1062                 if priority.__class__ is not DepPriority:
1063                         return False
1064                 if priority.optional:
1065                         return True
1066                 if not priority.satisfied:
1067                         return False
1068                 return bool(priority.runtime_post)
1069
1070         @classmethod
1071         def _ignore_satisfied_runtime(cls, priority):
1072                 if priority.__class__ is not DepPriority:
1073                         return False
1074                 if priority.optional:
1075                         return True
1076                 if not priority.satisfied:
1077                         return False
1078                 return not priority.buildtime
1079
1080         @classmethod
1081         def _ignore_satisfied_buildtime(cls, priority):
1082                 if priority.__class__ is not DepPriority:
1083                         return False
1084                 if priority.optional:
1085                         return True
1086                 if not priority.satisfied:
1087                         return False
1088                 if priority.buildtime:
1089                         return not priority.rebuild
1090                 return True
1091
1092         @classmethod
1093         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1094                 if priority.__class__ is not DepPriority:
1095                         return False
1096                 if priority.optional:
1097                         return True
1098                 return bool(priority.satisfied)
1099
1100         @classmethod
1101         def _ignore_runtime_post(cls, priority):
1102                 if priority.__class__ is not DepPriority:
1103                         return False
1104                 return bool(priority.optional or \
1105                         priority.satisfied or \
1106                         priority.runtime_post)
1107
1108         @classmethod
1109         def _ignore_runtime(cls, priority):
1110                 if priority.__class__ is not DepPriority:
1111                         return False
1112                 return bool(priority.satisfied or \
1113                         not priority.buildtime)
1114
1115         ignore_medium      = _ignore_runtime
1116         ignore_medium_soft = _ignore_runtime_post
1117         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1118
1119 DepPrioritySatisfiedRange.ignore_priority = (
1120         None,
1121         DepPrioritySatisfiedRange._ignore_optional,
1122         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1123         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1124         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1125         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1126         DepPrioritySatisfiedRange._ignore_runtime_post,
1127         DepPrioritySatisfiedRange._ignore_runtime
1128 )
1129
1130 def _find_deep_system_runtime_deps(graph):
1131         deep_system_deps = set()
1132         node_stack = []
1133         for node in graph:
1134                 if not isinstance(node, Package) or \
1135                         node.operation == 'uninstall':
1136                         continue
1137                 if node.root_config.sets['system'].findAtomForPackage(node):
1138                         node_stack.append(node)
1139
1140         def ignore_priority(priority):
1141                 """
1142                 Ignore non-runtime priorities.
1143                 """
1144                 if isinstance(priority, DepPriority) and \
1145                         (priority.runtime or priority.runtime_post):
1146                         return False
1147                 return True
1148
1149         while node_stack:
1150                 node = node_stack.pop()
1151                 if node in deep_system_deps:
1152                         continue
1153                 deep_system_deps.add(node)
1154                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1155                         if not isinstance(child, Package) or \
1156                                 child.operation == 'uninstall':
1157                                 continue
1158                         node_stack.append(child)
1159
1160         return deep_system_deps
1161
1162 class FakeVartree(portage.vartree):
1163         """This is implements an in-memory copy of a vartree instance that provides
1164         all the interfaces required for use by the depgraph.  The vardb is locked
1165         during the constructor call just long enough to read a copy of the
1166         installed package information.  This allows the depgraph to do it's
1167         dependency calculations without holding a lock on the vardb.  It also
1168         allows things like vardb global updates to be done in memory so that the
1169         user doesn't necessarily need write access to the vardb in cases where
1170         global updates are necessary (updates are performed when necessary if there
1171         is not a matching ebuild in the tree)."""
1172         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1173                 self._root_config = root_config
1174                 if pkg_cache is None:
1175                         pkg_cache = {}
1176                 real_vartree = root_config.trees["vartree"]
1177                 portdb = root_config.trees["porttree"].dbapi
1178                 self.root = real_vartree.root
1179                 self.settings = real_vartree.settings
1180                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1181                 if "_mtime_" not in mykeys:
1182                         mykeys.append("_mtime_")
1183                 self._db_keys = mykeys
1184                 self._pkg_cache = pkg_cache
1185                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1186                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1187                 try:
1188                         # At least the parent needs to exist for the lock file.
1189                         portage.util.ensure_dirs(vdb_path)
1190                 except portage.exception.PortageException:
1191                         pass
1192                 vdb_lock = None
1193                 try:
1194                         if acquire_lock and os.access(vdb_path, os.W_OK):
1195                                 vdb_lock = portage.locks.lockdir(vdb_path)
1196                         real_dbapi = real_vartree.dbapi
1197                         slot_counters = {}
1198                         for cpv in real_dbapi.cpv_all():
1199                                 cache_key = ("installed", self.root, cpv, "nomerge")
1200                                 pkg = self._pkg_cache.get(cache_key)
1201                                 if pkg is not None:
1202                                         metadata = pkg.metadata
1203                                 else:
1204                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1205                                 myslot = metadata["SLOT"]
1206                                 mycp = portage.dep_getkey(cpv)
1207                                 myslot_atom = "%s:%s" % (mycp, myslot)
1208                                 try:
1209                                         mycounter = long(metadata["COUNTER"])
1210                                 except ValueError:
1211                                         mycounter = 0
1212                                         metadata["COUNTER"] = str(mycounter)
1213                                 other_counter = slot_counters.get(myslot_atom, None)
1214                                 if other_counter is not None:
1215                                         if other_counter > mycounter:
1216                                                 continue
1217                                 slot_counters[myslot_atom] = mycounter
1218                                 if pkg is None:
1219                                         pkg = Package(built=True, cpv=cpv,
1220                                                 installed=True, metadata=metadata,
1221                                                 root_config=root_config, type_name="installed")
1222                                 self._pkg_cache[pkg] = pkg
1223                                 self.dbapi.cpv_inject(pkg)
1224                         real_dbapi.flush_cache()
1225                 finally:
1226                         if vdb_lock:
1227                                 portage.locks.unlockdir(vdb_lock)
1228                 # Populate the old-style virtuals using the cached values.
1229                 if not self.settings.treeVirtuals:
1230                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1231                                 portage.getCPFromCPV, self.get_all_provides())
1232
1233                 # Intialize variables needed for lazy cache pulls of the live ebuild
1234                 # metadata.  This ensures that the vardb lock is released ASAP, without
1235                 # being delayed in case cache generation is triggered.
1236                 self._aux_get = self.dbapi.aux_get
1237                 self.dbapi.aux_get = self._aux_get_wrapper
1238                 self._match = self.dbapi.match
1239                 self.dbapi.match = self._match_wrapper
1240                 self._aux_get_history = set()
1241                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1242                 self._portdb = portdb
1243                 self._global_updates = None
1244
1245         def _match_wrapper(self, cpv, use_cache=1):
1246                 """
1247                 Make sure the metadata in Package instances gets updated for any
1248                 cpv that is returned from a match() call, since the metadata can
1249                 be accessed directly from the Package instance instead of via
1250                 aux_get().
1251                 """
1252                 matches = self._match(cpv, use_cache=use_cache)
1253                 for cpv in matches:
1254                         if cpv in self._aux_get_history:
1255                                 continue
1256                         self._aux_get_wrapper(cpv, [])
1257                 return matches
1258
1259         def _aux_get_wrapper(self, pkg, wants):
1260                 if pkg in self._aux_get_history:
1261                         return self._aux_get(pkg, wants)
1262                 self._aux_get_history.add(pkg)
1263                 try:
1264                         # Use the live ebuild metadata if possible.
1265                         live_metadata = dict(izip(self._portdb_keys,
1266                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1267                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1268                                 raise KeyError(pkg)
1269                         self.dbapi.aux_update(pkg, live_metadata)
1270                 except (KeyError, portage.exception.PortageException):
1271                         if self._global_updates is None:
1272                                 self._global_updates = \
1273                                         grab_global_updates(self._portdb.porttree_root)
1274                         perform_global_updates(
1275                                 pkg, self.dbapi, self._global_updates)
1276                 return self._aux_get(pkg, wants)
1277
1278         def sync(self, acquire_lock=1):
1279                 """
1280                 Call this method to synchronize state with the real vardb
1281                 after one or more packages may have been installed or
1282                 uninstalled.
1283                 """
1284                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1285                 try:
1286                         # At least the parent needs to exist for the lock file.
1287                         portage.util.ensure_dirs(vdb_path)
1288                 except portage.exception.PortageException:
1289                         pass
1290                 vdb_lock = None
1291                 try:
1292                         if acquire_lock and os.access(vdb_path, os.W_OK):
1293                                 vdb_lock = portage.locks.lockdir(vdb_path)
1294                         self._sync()
1295                 finally:
1296                         if vdb_lock:
1297                                 portage.locks.unlockdir(vdb_lock)
1298
1299         def _sync(self):
1300
1301                 real_vardb = self._root_config.trees["vartree"].dbapi
1302                 current_cpv_set = frozenset(real_vardb.cpv_all())
1303                 pkg_vardb = self.dbapi
1304                 aux_get_history = self._aux_get_history
1305
1306                 # Remove any packages that have been uninstalled.
1307                 for pkg in list(pkg_vardb):
1308                         if pkg.cpv not in current_cpv_set:
1309                                 pkg_vardb.cpv_remove(pkg)
1310                                 aux_get_history.discard(pkg.cpv)
1311
1312                 # Validate counters and timestamps.
1313                 slot_counters = {}
1314                 root = self.root
1315                 validation_keys = ["COUNTER", "_mtime_"]
1316                 for cpv in current_cpv_set:
1317
1318                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1319                         pkg = pkg_vardb.get(pkg_hash_key)
1320                         if pkg is not None:
1321                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1322                                 try:
1323                                         counter = long(counter)
1324                                 except ValueError:
1325                                         counter = 0
1326
1327                                 if counter != pkg.counter or \
1328                                         mtime != pkg.mtime:
1329                                         pkg_vardb.cpv_remove(pkg)
1330                                         aux_get_history.discard(pkg.cpv)
1331                                         pkg = None
1332
1333                         if pkg is None:
1334                                 pkg = self._pkg(cpv)
1335
1336                         other_counter = slot_counters.get(pkg.slot_atom)
1337                         if other_counter is not None:
1338                                 if other_counter > pkg.counter:
1339                                         continue
1340
1341                         slot_counters[pkg.slot_atom] = pkg.counter
1342                         pkg_vardb.cpv_inject(pkg)
1343
1344                 real_vardb.flush_cache()
1345
1346         def _pkg(self, cpv):
1347                 root_config = self._root_config
1348                 real_vardb = root_config.trees["vartree"].dbapi
1349                 pkg = Package(cpv=cpv, installed=True,
1350                         metadata=izip(self._db_keys,
1351                         real_vardb.aux_get(cpv, self._db_keys)),
1352                         root_config=root_config,
1353                         type_name="installed")
1354
1355                 try:
1356                         mycounter = long(pkg.metadata["COUNTER"])
1357                 except ValueError:
1358                         mycounter = 0
1359                         pkg.metadata["COUNTER"] = str(mycounter)
1360
1361                 return pkg
1362
1363 def grab_global_updates(portdir):
1364         from portage.update import grab_updates, parse_updates
1365         updpath = os.path.join(portdir, "profiles", "updates")
1366         try:
1367                 rawupdates = grab_updates(updpath)
1368         except portage.exception.DirectoryNotFound:
1369                 rawupdates = []
1370         upd_commands = []
1371         for mykey, mystat, mycontent in rawupdates:
1372                 commands, errors = parse_updates(mycontent)
1373                 upd_commands.extend(commands)
1374         return upd_commands
1375
1376 def perform_global_updates(mycpv, mydb, mycommands):
1377         from portage.update import update_dbentries
1378         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1379         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1380         updates = update_dbentries(mycommands, aux_dict)
1381         if updates:
1382                 mydb.aux_update(mycpv, updates)
1383
1384 def visible(pkgsettings, pkg):
1385         """
1386         Check if a package is visible. This can raise an InvalidDependString
1387         exception if LICENSE is invalid.
1388         TODO: optionally generate a list of masking reasons
1389         @rtype: Boolean
1390         @returns: True if the package is visible, False otherwise.
1391         """
1392         if not pkg.metadata["SLOT"]:
1393                 return False
1394         if not pkg.installed:
1395                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1396                         return False
1397         eapi = pkg.metadata["EAPI"]
1398         if not portage.eapi_is_supported(eapi):
1399                 return False
1400         if not pkg.installed:
1401                 if portage._eapi_is_deprecated(eapi):
1402                         return False
1403                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1404                         return False
1405         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1406                 return False
1407         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1408                 return False
1409         try:
1410                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1411                         return False
1412         except portage.exception.InvalidDependString:
1413                 return False
1414         return True
1415
1416 def get_masking_status(pkg, pkgsettings, root_config):
1417
1418         mreasons = portage.getmaskingstatus(
1419                 pkg, settings=pkgsettings,
1420                 portdb=root_config.trees["porttree"].dbapi)
1421
1422         if not pkg.installed:
1423                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1424                         mreasons.append("CHOST: %s" % \
1425                                 pkg.metadata["CHOST"])
1426
1427         if not pkg.metadata["SLOT"]:
1428                 mreasons.append("invalid: SLOT is undefined")
1429
1430         return mreasons
1431
1432 def get_mask_info(root_config, cpv, pkgsettings,
1433         db, pkg_type, built, installed, db_keys):
1434         eapi_masked = False
1435         try:
1436                 metadata = dict(izip(db_keys,
1437                         db.aux_get(cpv, db_keys)))
1438         except KeyError:
1439                 metadata = None
1440         if metadata and not built:
1441                 pkgsettings.setcpv(cpv, mydb=metadata)
1442                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1443                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1444         if metadata is None:
1445                 mreasons = ["corruption"]
1446         else:
1447                 eapi = metadata['EAPI']
1448                 if eapi[:1] == '-':
1449                         eapi = eapi[1:]
1450                 if not portage.eapi_is_supported(eapi):
1451                         mreasons = ['EAPI %s' % eapi]
1452                 else:
1453                         pkg = Package(type_name=pkg_type, root_config=root_config,
1454                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1455                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1456         return metadata, mreasons
1457
1458 def show_masked_packages(masked_packages):
1459         shown_licenses = set()
1460         shown_comments = set()
1461         # Maybe there is both an ebuild and a binary. Only
1462         # show one of them to avoid redundant appearance.
1463         shown_cpvs = set()
1464         have_eapi_mask = False
1465         for (root_config, pkgsettings, cpv,
1466                 metadata, mreasons) in masked_packages:
1467                 if cpv in shown_cpvs:
1468                         continue
1469                 shown_cpvs.add(cpv)
1470                 comment, filename = None, None
1471                 if "package.mask" in mreasons:
1472                         comment, filename = \
1473                                 portage.getmaskingreason(
1474                                 cpv, metadata=metadata,
1475                                 settings=pkgsettings,
1476                                 portdb=root_config.trees["porttree"].dbapi,
1477                                 return_location=True)
1478                 missing_licenses = []
1479                 if metadata:
1480                         if not portage.eapi_is_supported(metadata["EAPI"]):
1481                                 have_eapi_mask = True
1482                         try:
1483                                 missing_licenses = \
1484                                         pkgsettings._getMissingLicenses(
1485                                                 cpv, metadata)
1486                         except portage.exception.InvalidDependString:
1487                                 # This will have already been reported
1488                                 # above via mreasons.
1489                                 pass
1490
1491                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1492                 if comment and comment not in shown_comments:
1493                         print filename+":"
1494                         print comment
1495                         shown_comments.add(comment)
1496                 portdb = root_config.trees["porttree"].dbapi
1497                 for l in missing_licenses:
1498                         l_path = portdb.findLicensePath(l)
1499                         if l in shown_licenses:
1500                                 continue
1501                         msg = ("A copy of the '%s' license" + \
1502                         " is located at '%s'.") % (l, l_path)
1503                         print msg
1504                         print
1505                         shown_licenses.add(l)
1506         return have_eapi_mask
1507
1508 class Task(SlotObject):
1509         __slots__ = ("_hash_key", "_hash_value")
1510
1511         def _get_hash_key(self):
1512                 hash_key = getattr(self, "_hash_key", None)
1513                 if hash_key is None:
1514                         raise NotImplementedError(self)
1515                 return hash_key
1516
1517         def __eq__(self, other):
1518                 return self._get_hash_key() == other
1519
1520         def __ne__(self, other):
1521                 return self._get_hash_key() != other
1522
1523         def __hash__(self):
1524                 hash_value = getattr(self, "_hash_value", None)
1525                 if hash_value is None:
1526                         self._hash_value = hash(self._get_hash_key())
1527                 return self._hash_value
1528
1529         def __len__(self):
1530                 return len(self._get_hash_key())
1531
1532         def __getitem__(self, key):
1533                 return self._get_hash_key()[key]
1534
1535         def __iter__(self):
1536                 return iter(self._get_hash_key())
1537
1538         def __contains__(self, key):
1539                 return key in self._get_hash_key()
1540
1541         def __str__(self):
1542                 return str(self._get_hash_key())
1543
1544 class Blocker(Task):
1545
1546         __hash__ = Task.__hash__
1547         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1548
1549         def __init__(self, **kwargs):
1550                 Task.__init__(self, **kwargs)
1551                 self.cp = portage.dep_getkey(self.atom)
1552
1553         def _get_hash_key(self):
1554                 hash_key = getattr(self, "_hash_key", None)
1555                 if hash_key is None:
1556                         self._hash_key = \
1557                                 ("blocks", self.root, self.atom, self.eapi)
1558                 return self._hash_key
1559
1560 class Package(Task):
1561
1562         __hash__ = Task.__hash__
1563         __slots__ = ("built", "cpv", "depth",
1564                 "installed", "metadata", "onlydeps", "operation",
1565                 "root_config", "type_name",
1566                 "category", "counter", "cp", "cpv_split",
1567                 "inherited", "iuse", "mtime",
1568                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1569
1570         metadata_keys = [
1571                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1572                 "INHERITED", "IUSE", "KEYWORDS",
1573                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1574                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1575
1576         def __init__(self, **kwargs):
1577                 Task.__init__(self, **kwargs)
1578                 self.root = self.root_config.root
1579                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1580                 self.cp = portage.cpv_getkey(self.cpv)
1581                 slot = self.slot
1582                 if not slot:
1583                         # Avoid an InvalidAtom exception when creating slot_atom.
1584                         # This package instance will be masked due to empty SLOT.
1585                         slot = '0'
1586                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1587                 self.category, self.pf = portage.catsplit(self.cpv)
1588                 self.cpv_split = portage.catpkgsplit(self.cpv)
1589                 self.pv_split = self.cpv_split[1:]
1590
1591         class _use(object):
1592
1593                 __slots__ = ("__weakref__", "enabled")
1594
1595                 def __init__(self, use):
1596                         self.enabled = frozenset(use)
1597
1598         class _iuse(object):
1599
1600                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1601
1602                 def __init__(self, tokens, iuse_implicit):
1603                         self.tokens = tuple(tokens)
1604                         self.iuse_implicit = iuse_implicit
1605                         enabled = []
1606                         disabled = []
1607                         other = []
1608                         for x in tokens:
1609                                 prefix = x[:1]
1610                                 if prefix == "+":
1611                                         enabled.append(x[1:])
1612                                 elif prefix == "-":
1613                                         disabled.append(x[1:])
1614                                 else:
1615                                         other.append(x)
1616                         self.enabled = frozenset(enabled)
1617                         self.disabled = frozenset(disabled)
1618                         self.all = frozenset(chain(enabled, disabled, other))
1619
1620                 def __getattribute__(self, name):
1621                         if name == "regex":
1622                                 try:
1623                                         return object.__getattribute__(self, "regex")
1624                                 except AttributeError:
1625                                         all = object.__getattribute__(self, "all")
1626                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1627                                         # Escape anything except ".*" which is supposed
1628                                         # to pass through from _get_implicit_iuse()
1629                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1630                                         regex = "^(%s)$" % "|".join(regex)
1631                                         regex = regex.replace("\\.\\*", ".*")
1632                                         self.regex = re.compile(regex)
1633                         return object.__getattribute__(self, name)
1634
1635         def _get_hash_key(self):
1636                 hash_key = getattr(self, "_hash_key", None)
1637                 if hash_key is None:
1638                         if self.operation is None:
1639                                 self.operation = "merge"
1640                                 if self.onlydeps or self.installed:
1641                                         self.operation = "nomerge"
1642                         self._hash_key = \
1643                                 (self.type_name, self.root, self.cpv, self.operation)
1644                 return self._hash_key
1645
1646         def __lt__(self, other):
1647                 if other.cp != self.cp:
1648                         return False
1649                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1650                         return True
1651                 return False
1652
1653         def __le__(self, other):
1654                 if other.cp != self.cp:
1655                         return False
1656                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1657                         return True
1658                 return False
1659
1660         def __gt__(self, other):
1661                 if other.cp != self.cp:
1662                         return False
1663                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1664                         return True
1665                 return False
1666
1667         def __ge__(self, other):
1668                 if other.cp != self.cp:
1669                         return False
1670                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1671                         return True
1672                 return False
1673
1674 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1675         if not x.startswith("UNUSED_"))
1676 _all_metadata_keys.discard("CDEPEND")
1677 _all_metadata_keys.update(Package.metadata_keys)
1678
1679 from portage.cache.mappings import slot_dict_class
1680 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1681
1682 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1683         """
1684         Detect metadata updates and synchronize Package attributes.
1685         """
1686
1687         __slots__ = ("_pkg",)
1688         _wrapped_keys = frozenset(
1689                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1690
1691         def __init__(self, pkg, metadata):
1692                 _PackageMetadataWrapperBase.__init__(self)
1693                 self._pkg = pkg
1694                 self.update(metadata)
1695
1696         def __setitem__(self, k, v):
1697                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1698                 if k in self._wrapped_keys:
1699                         getattr(self, "_set_" + k.lower())(k, v)
1700
1701         def _set_inherited(self, k, v):
1702                 if isinstance(v, basestring):
1703                         v = frozenset(v.split())
1704                 self._pkg.inherited = v
1705
1706         def _set_iuse(self, k, v):
1707                 self._pkg.iuse = self._pkg._iuse(
1708                         v.split(), self._pkg.root_config.iuse_implicit)
1709
1710         def _set_slot(self, k, v):
1711                 self._pkg.slot = v
1712
1713         def _set_use(self, k, v):
1714                 self._pkg.use = self._pkg._use(v.split())
1715
1716         def _set_counter(self, k, v):
1717                 if isinstance(v, basestring):
1718                         try:
1719                                 v = long(v.strip())
1720                         except ValueError:
1721                                 v = 0
1722                 self._pkg.counter = v
1723
1724         def _set__mtime_(self, k, v):
1725                 if isinstance(v, basestring):
1726                         try:
1727                                 v = long(v.strip())
1728                         except ValueError:
1729                                 v = 0
1730                 self._pkg.mtime = v
1731
1732 class EbuildFetchonly(SlotObject):
1733
1734         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1735
1736         def execute(self):
1737                 settings = self.settings
1738                 pkg = self.pkg
1739                 portdb = pkg.root_config.trees["porttree"].dbapi
1740                 ebuild_path = portdb.findname(pkg.cpv)
1741                 settings.setcpv(pkg)
1742                 debug = settings.get("PORTAGE_DEBUG") == "1"
1743                 use_cache = 1 # always true
1744                 portage.doebuild_environment(ebuild_path, "fetch",
1745                         settings["ROOT"], settings, debug, use_cache, portdb)
1746                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1747
1748                 if restrict_fetch:
1749                         rval = self._execute_with_builddir()
1750                 else:
1751                         rval = portage.doebuild(ebuild_path, "fetch",
1752                                 settings["ROOT"], settings, debug=debug,
1753                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1754                                 mydbapi=portdb, tree="porttree")
1755
1756                         if rval != os.EX_OK:
1757                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1758                                 eerror(msg, phase="unpack", key=pkg.cpv)
1759
1760                 return rval
1761
1762         def _execute_with_builddir(self):
1763                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1764                 # ensuring sane $PWD (bug #239560) and storing elog
1765                 # messages. Use a private temp directory, in order
1766                 # to avoid locking the main one.
1767                 settings = self.settings
1768                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1769                 from tempfile import mkdtemp
1770                 try:
1771                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1772                 except OSError, e:
1773                         if e.errno != portage.exception.PermissionDenied.errno:
1774                                 raise
1775                         raise portage.exception.PermissionDenied(global_tmpdir)
1776                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1777                 settings.backup_changes("PORTAGE_TMPDIR")
1778                 try:
1779                         retval = self._execute()
1780                 finally:
1781                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1782                         settings.backup_changes("PORTAGE_TMPDIR")
1783                         shutil.rmtree(private_tmpdir)
1784                 return retval
1785
1786         def _execute(self):
1787                 settings = self.settings
1788                 pkg = self.pkg
1789                 root_config = pkg.root_config
1790                 portdb = root_config.trees["porttree"].dbapi
1791                 ebuild_path = portdb.findname(pkg.cpv)
1792                 debug = settings.get("PORTAGE_DEBUG") == "1"
1793                 retval = portage.doebuild(ebuild_path, "fetch",
1794                         self.settings["ROOT"], self.settings, debug=debug,
1795                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1796                         mydbapi=portdb, tree="porttree")
1797
1798                 if retval != os.EX_OK:
1799                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1800                         eerror(msg, phase="unpack", key=pkg.cpv)
1801
1802                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1803                 return retval
1804
1805 class PollConstants(object):
1806
1807         """
1808         Provides POLL* constants that are equivalent to those from the
1809         select module, for use by PollSelectAdapter.
1810         """
1811
1812         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1813         v = 1
1814         for k in names:
1815                 locals()[k] = getattr(select, k, v)
1816                 v *= 2
1817         del k, v
1818
1819 class AsynchronousTask(SlotObject):
1820         """
1821         Subclasses override _wait() and _poll() so that calls
1822         to public methods can be wrapped for implementing
1823         hooks such as exit listener notification.
1824
1825         Sublasses should call self.wait() to notify exit listeners after
1826         the task is complete and self.returncode has been set.
1827         """
1828
1829         __slots__ = ("background", "cancelled", "returncode") + \
1830                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1831
1832         def start(self):
1833                 """
1834                 Start an asynchronous task and then return as soon as possible.
1835                 """
1836                 self._start_hook()
1837                 self._start()
1838
1839         def _start(self):
1840                 raise NotImplementedError(self)
1841
1842         def isAlive(self):
1843                 return self.returncode is None
1844
1845         def poll(self):
1846                 self._wait_hook()
1847                 return self._poll()
1848
1849         def _poll(self):
1850                 return self.returncode
1851
1852         def wait(self):
1853                 if self.returncode is None:
1854                         self._wait()
1855                 self._wait_hook()
1856                 return self.returncode
1857
1858         def _wait(self):
1859                 return self.returncode
1860
1861         def cancel(self):
1862                 self.cancelled = True
1863                 self.wait()
1864
1865         def addStartListener(self, f):
1866                 """
1867                 The function will be called with one argument, a reference to self.
1868                 """
1869                 if self._start_listeners is None:
1870                         self._start_listeners = []
1871                 self._start_listeners.append(f)
1872
1873         def removeStartListener(self, f):
1874                 if self._start_listeners is None:
1875                         return
1876                 self._start_listeners.remove(f)
1877
1878         def _start_hook(self):
1879                 if self._start_listeners is not None:
1880                         start_listeners = self._start_listeners
1881                         self._start_listeners = None
1882
1883                         for f in start_listeners:
1884                                 f(self)
1885
1886         def addExitListener(self, f):
1887                 """
1888                 The function will be called with one argument, a reference to self.
1889                 """
1890                 if self._exit_listeners is None:
1891                         self._exit_listeners = []
1892                 self._exit_listeners.append(f)
1893
1894         def removeExitListener(self, f):
1895                 if self._exit_listeners is None:
1896                         if self._exit_listener_stack is not None:
1897                                 self._exit_listener_stack.remove(f)
1898                         return
1899                 self._exit_listeners.remove(f)
1900
1901         def _wait_hook(self):
1902                 """
1903                 Call this method after the task completes, just before returning
1904                 the returncode from wait() or poll(). This hook is
1905                 used to trigger exit listeners when the returncode first
1906                 becomes available.
1907                 """
1908                 if self.returncode is not None and \
1909                         self._exit_listeners is not None:
1910
1911                         # This prevents recursion, in case one of the
1912                         # exit handlers triggers this method again by
1913                         # calling wait(). Use a stack that gives
1914                         # removeExitListener() an opportunity to consume
1915                         # listeners from the stack, before they can get
1916                         # called below. This is necessary because a call
1917                         # to one exit listener may result in a call to
1918                         # removeExitListener() for another listener on
1919                         # the stack. That listener needs to be removed
1920                         # from the stack since it would be inconsistent
1921                         # to call it after it has been been passed into
1922                         # removeExitListener().
1923                         self._exit_listener_stack = self._exit_listeners
1924                         self._exit_listeners = None
1925
1926                         self._exit_listener_stack.reverse()
1927                         while self._exit_listener_stack:
1928                                 self._exit_listener_stack.pop()(self)
1929
1930 class AbstractPollTask(AsynchronousTask):
1931
1932         __slots__ = ("scheduler",) + \
1933                 ("_registered",)
1934
1935         _bufsize = 4096
1936         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1937         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1938                 _exceptional_events
1939
1940         def _unregister(self):
1941                 raise NotImplementedError(self)
1942
1943         def _unregister_if_appropriate(self, event):
1944                 if self._registered:
1945                         if event & self._exceptional_events:
1946                                 self._unregister()
1947                                 self.cancel()
1948                         elif event & PollConstants.POLLHUP:
1949                                 self._unregister()
1950                                 self.wait()
1951
1952 class PipeReader(AbstractPollTask):
1953
1954         """
1955         Reads output from one or more files and saves it in memory,
1956         for retrieval via the getvalue() method. This is driven by
1957         the scheduler's poll() loop, so it runs entirely within the
1958         current process.
1959         """
1960
1961         __slots__ = ("input_files",) + \
1962                 ("_read_data", "_reg_ids")
1963
1964         def _start(self):
1965                 self._reg_ids = set()
1966                 self._read_data = []
1967                 for k, f in self.input_files.iteritems():
1968                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1969                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1970                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1971                                 self._registered_events, self._output_handler))
1972                 self._registered = True
1973
1974         def isAlive(self):
1975                 return self._registered
1976
1977         def cancel(self):
1978                 if self.returncode is None:
1979                         self.returncode = 1
1980                         self.cancelled = True
1981                 self.wait()
1982
1983         def _wait(self):
1984                 if self.returncode is not None:
1985                         return self.returncode
1986
1987                 if self._registered:
1988                         self.scheduler.schedule(self._reg_ids)
1989                         self._unregister()
1990
1991                 self.returncode = os.EX_OK
1992                 return self.returncode
1993
1994         def getvalue(self):
1995                 """Retrieve the entire contents"""
1996                 if sys.hexversion >= 0x3000000:
1997                         return bytes().join(self._read_data)
1998                 return "".join(self._read_data)
1999
2000         def close(self):
2001                 """Free the memory buffer."""
2002                 self._read_data = None
2003
2004         def _output_handler(self, fd, event):
2005
2006                 if event & PollConstants.POLLIN:
2007
2008                         for f in self.input_files.itervalues():
2009                                 if fd == f.fileno():
2010                                         break
2011
2012                         buf = array.array('B')
2013                         try:
2014                                 buf.fromfile(f, self._bufsize)
2015                         except EOFError:
2016                                 pass
2017
2018                         if buf:
2019                                 self._read_data.append(buf.tostring())
2020                         else:
2021                                 self._unregister()
2022                                 self.wait()
2023
2024                 self._unregister_if_appropriate(event)
2025                 return self._registered
2026
2027         def _unregister(self):
2028                 """
2029                 Unregister from the scheduler and close open files.
2030                 """
2031
2032                 self._registered = False
2033
2034                 if self._reg_ids is not None:
2035                         for reg_id in self._reg_ids:
2036                                 self.scheduler.unregister(reg_id)
2037                         self._reg_ids = None
2038
2039                 if self.input_files is not None:
2040                         for f in self.input_files.itervalues():
2041                                 f.close()
2042                         self.input_files = None
2043
2044 class CompositeTask(AsynchronousTask):
2045
2046         __slots__ = ("scheduler",) + ("_current_task",)
2047
2048         def isAlive(self):
2049                 return self._current_task is not None
2050
2051         def cancel(self):
2052                 self.cancelled = True
2053                 if self._current_task is not None:
2054                         self._current_task.cancel()
2055
2056         def _poll(self):
2057                 """
2058                 This does a loop calling self._current_task.poll()
2059                 repeatedly as long as the value of self._current_task
2060                 keeps changing. It calls poll() a maximum of one time
2061                 for a given self._current_task instance. This is useful
2062                 since calling poll() on a task can trigger advance to
2063                 the next task could eventually lead to the returncode
2064                 being set in cases when polling only a single task would
2065                 not have the same effect.
2066                 """
2067
2068                 prev = None
2069                 while True:
2070                         task = self._current_task
2071                         if task is None or task is prev:
2072                                 # don't poll the same task more than once
2073                                 break
2074                         task.poll()
2075                         prev = task
2076
2077                 return self.returncode
2078
2079         def _wait(self):
2080
2081                 prev = None
2082                 while True:
2083                         task = self._current_task
2084                         if task is None:
2085                                 # don't wait for the same task more than once
2086                                 break
2087                         if task is prev:
2088                                 # Before the task.wait() method returned, an exit
2089                                 # listener should have set self._current_task to either
2090                                 # a different task or None. Something is wrong.
2091                                 raise AssertionError("self._current_task has not " + \
2092                                         "changed since calling wait", self, task)
2093                         task.wait()
2094                         prev = task
2095
2096                 return self.returncode
2097
2098         def _assert_current(self, task):
2099                 """
2100                 Raises an AssertionError if the given task is not the
2101                 same one as self._current_task. This can be useful
2102                 for detecting bugs.
2103                 """
2104                 if task is not self._current_task:
2105                         raise AssertionError("Unrecognized task: %s" % (task,))
2106
2107         def _default_exit(self, task):
2108                 """
2109                 Calls _assert_current() on the given task and then sets the
2110                 composite returncode attribute if task.returncode != os.EX_OK.
2111                 If the task failed then self._current_task will be set to None.
2112                 Subclasses can use this as a generic task exit callback.
2113
2114                 @rtype: int
2115                 @returns: The task.returncode attribute.
2116                 """
2117                 self._assert_current(task)
2118                 if task.returncode != os.EX_OK:
2119                         self.returncode = task.returncode
2120                         self._current_task = None
2121                 return task.returncode
2122
2123         def _final_exit(self, task):
2124                 """
2125                 Assumes that task is the final task of this composite task.
2126                 Calls _default_exit() and sets self.returncode to the task's
2127                 returncode and sets self._current_task to None.
2128                 """
2129                 self._default_exit(task)
2130                 self._current_task = None
2131                 self.returncode = task.returncode
2132                 return self.returncode
2133
2134         def _default_final_exit(self, task):
2135                 """
2136                 This calls _final_exit() and then wait().
2137
2138                 Subclasses can use this as a generic final task exit callback.
2139
2140                 """
2141                 self._final_exit(task)
2142                 return self.wait()
2143
2144         def _start_task(self, task, exit_handler):
2145                 """
2146                 Register exit handler for the given task, set it
2147                 as self._current_task, and call task.start().
2148
2149                 Subclasses can use this as a generic way to start
2150                 a task.
2151
2152                 """
2153                 task.addExitListener(exit_handler)
2154                 self._current_task = task
2155                 task.start()
2156
2157 class TaskSequence(CompositeTask):
2158         """
2159         A collection of tasks that executes sequentially. Each task
2160         must have a addExitListener() method that can be used as
2161         a means to trigger movement from one task to the next.
2162         """
2163
2164         __slots__ = ("_task_queue",)
2165
2166         def __init__(self, **kwargs):
2167                 AsynchronousTask.__init__(self, **kwargs)
2168                 self._task_queue = deque()
2169
2170         def add(self, task):
2171                 self._task_queue.append(task)
2172
2173         def _start(self):
2174                 self._start_next_task()
2175
2176         def cancel(self):
2177                 self._task_queue.clear()
2178                 CompositeTask.cancel(self)
2179
2180         def _start_next_task(self):
2181                 self._start_task(self._task_queue.popleft(),
2182                         self._task_exit_handler)
2183
2184         def _task_exit_handler(self, task):
2185                 if self._default_exit(task) != os.EX_OK:
2186                         self.wait()
2187                 elif self._task_queue:
2188                         self._start_next_task()
2189                 else:
2190                         self._final_exit(task)
2191                         self.wait()
2192
2193 class SubProcess(AbstractPollTask):
2194
2195         __slots__ = ("pid",) + \
2196                 ("_files", "_reg_id")
2197
2198         # A file descriptor is required for the scheduler to monitor changes from
2199         # inside a poll() loop. When logging is not enabled, create a pipe just to
2200         # serve this purpose alone.
2201         _dummy_pipe_fd = 9
2202
2203         def _poll(self):
2204                 if self.returncode is not None:
2205                         return self.returncode
2206                 if self.pid is None:
2207                         return self.returncode
2208                 if self._registered:
2209                         return self.returncode
2210
2211                 try:
2212                         retval = os.waitpid(self.pid, os.WNOHANG)
2213                 except OSError, e:
2214                         if e.errno != errno.ECHILD:
2215                                 raise
2216                         del e
2217                         retval = (self.pid, 1)
2218
2219                 if retval == (0, 0):
2220                         return None
2221                 self._set_returncode(retval)
2222                 return self.returncode
2223
2224         def cancel(self):
2225                 if self.isAlive():
2226                         try:
2227                                 os.kill(self.pid, signal.SIGTERM)
2228                         except OSError, e:
2229                                 if e.errno != errno.ESRCH:
2230                                         raise
2231                                 del e
2232
2233                 self.cancelled = True
2234                 if self.pid is not None:
2235                         self.wait()
2236                 return self.returncode
2237
2238         def isAlive(self):
2239                 return self.pid is not None and \
2240                         self.returncode is None
2241
2242         def _wait(self):
2243
2244                 if self.returncode is not None:
2245                         return self.returncode
2246
2247                 if self._registered:
2248                         self.scheduler.schedule(self._reg_id)
2249                         self._unregister()
2250                         if self.returncode is not None:
2251                                 return self.returncode
2252
2253                 try:
2254                         wait_retval = os.waitpid(self.pid, 0)
2255                 except OSError, e:
2256                         if e.errno != errno.ECHILD:
2257                                 raise
2258                         del e
2259                         self._set_returncode((self.pid, 1))
2260                 else:
2261                         self._set_returncode(wait_retval)
2262
2263                 return self.returncode
2264
2265         def _unregister(self):
2266                 """
2267                 Unregister from the scheduler and close open files.
2268                 """
2269
2270                 self._registered = False
2271
2272                 if self._reg_id is not None:
2273                         self.scheduler.unregister(self._reg_id)
2274                         self._reg_id = None
2275
2276                 if self._files is not None:
2277                         for f in self._files.itervalues():
2278                                 f.close()
2279                         self._files = None
2280
2281         def _set_returncode(self, wait_retval):
2282
2283                 retval = wait_retval[1]
2284
2285                 if retval != os.EX_OK:
2286                         if retval & 0xff:
2287                                 retval = (retval & 0xff) << 8
2288                         else:
2289                                 retval = retval >> 8
2290
2291                 self.returncode = retval
2292
2293 class SpawnProcess(SubProcess):
2294
2295         """
2296         Constructor keyword args are passed into portage.process.spawn().
2297         The required "args" keyword argument will be passed as the first
2298         spawn() argument.
2299         """
2300
2301         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2302                 "uid", "gid", "groups", "umask", "logfile",
2303                 "path_lookup", "pre_exec")
2304
2305         __slots__ = ("args",) + \
2306                 _spawn_kwarg_names
2307
2308         _file_names = ("log", "process", "stdout")
2309         _files_dict = slot_dict_class(_file_names, prefix="")
2310
2311         def _start(self):
2312
2313                 if self.cancelled:
2314                         return
2315
2316                 if self.fd_pipes is None:
2317                         self.fd_pipes = {}
2318                 fd_pipes = self.fd_pipes
2319                 fd_pipes.setdefault(0, sys.stdin.fileno())
2320                 fd_pipes.setdefault(1, sys.stdout.fileno())
2321                 fd_pipes.setdefault(2, sys.stderr.fileno())
2322
2323                 # flush any pending output
2324                 for fd in fd_pipes.itervalues():
2325                         if fd == sys.stdout.fileno():
2326                                 sys.stdout.flush()
2327                         if fd == sys.stderr.fileno():
2328                                 sys.stderr.flush()
2329
2330                 logfile = self.logfile
2331                 self._files = self._files_dict()
2332                 files = self._files
2333
2334                 master_fd, slave_fd = self._pipe(fd_pipes)
2335                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2336                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2337
2338                 null_input = None
2339                 fd_pipes_orig = fd_pipes.copy()
2340                 if self.background:
2341                         # TODO: Use job control functions like tcsetpgrp() to control
2342                         # access to stdin. Until then, use /dev/null so that any
2343                         # attempts to read from stdin will immediately return EOF
2344                         # instead of blocking indefinitely.
2345                         null_input = open('/dev/null', 'rb')
2346                         fd_pipes[0] = null_input.fileno()
2347                 else:
2348                         fd_pipes[0] = fd_pipes_orig[0]
2349
2350                 files.process = os.fdopen(master_fd, 'rb')
2351                 if logfile is not None:
2352
2353                         fd_pipes[1] = slave_fd
2354                         fd_pipes[2] = slave_fd
2355
2356                         files.log = open(logfile, mode='ab')
2357                         portage.util.apply_secpass_permissions(logfile,
2358                                 uid=portage.portage_uid, gid=portage.portage_gid,
2359                                 mode=0660)
2360
2361                         if not self.background:
2362                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2363
2364                         output_handler = self._output_handler
2365
2366                 else:
2367
2368                         # Create a dummy pipe so the scheduler can monitor
2369                         # the process from inside a poll() loop.
2370                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2371                         if self.background:
2372                                 fd_pipes[1] = slave_fd
2373                                 fd_pipes[2] = slave_fd
2374                         output_handler = self._dummy_handler
2375
2376                 kwargs = {}
2377                 for k in self._spawn_kwarg_names:
2378                         v = getattr(self, k)
2379                         if v is not None:
2380                                 kwargs[k] = v
2381
2382                 kwargs["fd_pipes"] = fd_pipes
2383                 kwargs["returnpid"] = True
2384                 kwargs.pop("logfile", None)
2385
2386                 self._reg_id = self.scheduler.register(files.process.fileno(),
2387                         self._registered_events, output_handler)
2388                 self._registered = True
2389
2390                 retval = self._spawn(self.args, **kwargs)
2391
2392                 os.close(slave_fd)
2393                 if null_input is not None:
2394                         null_input.close()
2395
2396                 if isinstance(retval, int):
2397                         # spawn failed
2398                         self._unregister()
2399                         self.returncode = retval
2400                         self.wait()
2401                         return
2402
2403                 self.pid = retval[0]
2404                 portage.process.spawned_pids.remove(self.pid)
2405
2406         def _pipe(self, fd_pipes):
2407                 """
2408                 @type fd_pipes: dict
2409                 @param fd_pipes: pipes from which to copy terminal size if desired.
2410                 """
2411                 return os.pipe()
2412
2413         def _spawn(self, args, **kwargs):
2414                 return portage.process.spawn(args, **kwargs)
2415
2416         def _output_handler(self, fd, event):
2417
2418                 if event & PollConstants.POLLIN:
2419
2420                         files = self._files
2421                         buf = array.array('B')
2422                         try:
2423                                 buf.fromfile(files.process, self._bufsize)
2424                         except EOFError:
2425                                 pass
2426
2427                         if buf:
2428                                 if not self.background:
2429                                         buf.tofile(files.stdout)
2430                                         files.stdout.flush()
2431                                 buf.tofile(files.log)
2432                                 files.log.flush()
2433                         else:
2434                                 self._unregister()
2435                                 self.wait()
2436
2437                 self._unregister_if_appropriate(event)
2438                 return self._registered
2439
2440         def _dummy_handler(self, fd, event):
2441                 """
2442                 This method is mainly interested in detecting EOF, since
2443                 the only purpose of the pipe is to allow the scheduler to
2444                 monitor the process from inside a poll() loop.
2445                 """
2446
2447                 if event & PollConstants.POLLIN:
2448
2449                         buf = array.array('B')
2450                         try:
2451                                 buf.fromfile(self._files.process, self._bufsize)
2452                         except EOFError:
2453                                 pass
2454
2455                         if buf:
2456                                 pass
2457                         else:
2458                                 self._unregister()
2459                                 self.wait()
2460
2461                 self._unregister_if_appropriate(event)
2462                 return self._registered
2463
2464 class MiscFunctionsProcess(SpawnProcess):
2465         """
2466         Spawns misc-functions.sh with an existing ebuild environment.
2467         """
2468
2469         __slots__ = ("commands", "phase", "pkg", "settings")
2470
2471         def _start(self):
2472                 settings = self.settings
2473                 settings.pop("EBUILD_PHASE", None)
2474                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2475                 misc_sh_binary = os.path.join(portage_bin_path,
2476                         os.path.basename(portage.const.MISC_SH_BINARY))
2477
2478                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2479                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2480
2481                 portage._doebuild_exit_status_unlink(
2482                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2483
2484                 SpawnProcess._start(self)
2485
2486         def _spawn(self, args, **kwargs):
2487                 settings = self.settings
2488                 debug = settings.get("PORTAGE_DEBUG") == "1"
2489                 return portage.spawn(" ".join(args), settings,
2490                         debug=debug, **kwargs)
2491
2492         def _set_returncode(self, wait_retval):
2493                 SpawnProcess._set_returncode(self, wait_retval)
2494                 self.returncode = portage._doebuild_exit_status_check_and_log(
2495                         self.settings, self.phase, self.returncode)
2496
2497 class EbuildFetcher(SpawnProcess):
2498
2499         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2500                 ("_build_dir",)
2501
2502         def _start(self):
2503
2504                 root_config = self.pkg.root_config
2505                 portdb = root_config.trees["porttree"].dbapi
2506                 ebuild_path = portdb.findname(self.pkg.cpv)
2507                 settings = self.config_pool.allocate()
2508                 settings.setcpv(self.pkg)
2509
2510                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2511                 # should not be touched since otherwise it could interfere with
2512                 # another instance of the same cpv concurrently being built for a
2513                 # different $ROOT (currently, builds only cooperate with prefetchers
2514                 # that are spawned for the same $ROOT).
2515                 if not self.prefetch:
2516                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2517                         self._build_dir.lock()
2518                         self._build_dir.clean_log()
2519                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2520                         if self.logfile is None:
2521                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2522
2523                 phase = "fetch"
2524                 if self.fetchall:
2525                         phase = "fetchall"
2526
2527                 # If any incremental variables have been overridden
2528                 # via the environment, those values need to be passed
2529                 # along here so that they are correctly considered by
2530                 # the config instance in the subproccess.
2531                 fetch_env = os.environ.copy()
2532
2533                 nocolor = settings.get("NOCOLOR")
2534                 if nocolor is not None:
2535                         fetch_env["NOCOLOR"] = nocolor
2536
2537                 fetch_env["PORTAGE_NICENESS"] = "0"
2538                 if self.prefetch:
2539                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2540
2541                 ebuild_binary = os.path.join(
2542                         settings["PORTAGE_BIN_PATH"], "ebuild")
2543
2544                 fetch_args = [ebuild_binary, ebuild_path, phase]
2545                 debug = settings.get("PORTAGE_DEBUG") == "1"
2546                 if debug:
2547                         fetch_args.append("--debug")
2548
2549                 self.args = fetch_args
2550                 self.env = fetch_env
2551                 SpawnProcess._start(self)
2552
2553         def _pipe(self, fd_pipes):
2554                 """When appropriate, use a pty so that fetcher progress bars,
2555                 like wget has, will work properly."""
2556                 if self.background or not sys.stdout.isatty():
2557                         # When the output only goes to a log file,
2558                         # there's no point in creating a pty.
2559                         return os.pipe()
2560                 stdout_pipe = fd_pipes.get(1)
2561                 got_pty, master_fd, slave_fd = \
2562                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2563                 return (master_fd, slave_fd)
2564
2565         def _set_returncode(self, wait_retval):
2566                 SpawnProcess._set_returncode(self, wait_retval)
2567                 # Collect elog messages that might have been
2568                 # created by the pkg_nofetch phase.
2569                 if self._build_dir is not None:
2570                         # Skip elog messages for prefetch, in order to avoid duplicates.
2571                         if not self.prefetch and self.returncode != os.EX_OK:
2572                                 elog_out = None
2573                                 if self.logfile is not None:
2574                                         if self.background:
2575                                                 elog_out = open(self.logfile, 'a')
2576                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2577                                 if self.logfile is not None:
2578                                         msg += ", Log file:"
2579                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2580                                 if self.logfile is not None:
2581                                         eerror(" '%s'" % (self.logfile,),
2582                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2583                                 if elog_out is not None:
2584                                         elog_out.close()
2585                         if not self.prefetch:
2586                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2587                         features = self._build_dir.settings.features
2588                         if self.returncode == os.EX_OK:
2589                                 self._build_dir.clean_log()
2590                         self._build_dir.unlock()
2591                         self.config_pool.deallocate(self._build_dir.settings)
2592                         self._build_dir = None
2593
2594 class EbuildBuildDir(SlotObject):
2595
2596         __slots__ = ("dir_path", "pkg", "settings",
2597                 "locked", "_catdir", "_lock_obj")
2598
2599         def __init__(self, **kwargs):
2600                 SlotObject.__init__(self, **kwargs)
2601                 self.locked = False
2602
2603         def lock(self):
2604                 """
2605                 This raises an AlreadyLocked exception if lock() is called
2606                 while a lock is already held. In order to avoid this, call
2607                 unlock() or check whether the "locked" attribute is True
2608                 or False before calling lock().
2609                 """
2610                 if self._lock_obj is not None:
2611                         raise self.AlreadyLocked((self._lock_obj,))
2612
2613                 dir_path = self.dir_path
2614                 if dir_path is None:
2615                         root_config = self.pkg.root_config
2616                         portdb = root_config.trees["porttree"].dbapi
2617                         ebuild_path = portdb.findname(self.pkg.cpv)
2618                         settings = self.settings
2619                         settings.setcpv(self.pkg)
2620                         debug = settings.get("PORTAGE_DEBUG") == "1"
2621                         use_cache = 1 # always true
2622                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2623                                 self.settings, debug, use_cache, portdb)
2624                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2625
2626                 catdir = os.path.dirname(dir_path)
2627                 self._catdir = catdir
2628
2629                 portage.util.ensure_dirs(os.path.dirname(catdir),
2630                         gid=portage.portage_gid,
2631                         mode=070, mask=0)
2632                 catdir_lock = None
2633                 try:
2634                         catdir_lock = portage.locks.lockdir(catdir)
2635                         portage.util.ensure_dirs(catdir,
2636                                 gid=portage.portage_gid,
2637                                 mode=070, mask=0)
2638                         self._lock_obj = portage.locks.lockdir(dir_path)
2639                 finally:
2640                         self.locked = self._lock_obj is not None
2641                         if catdir_lock is not None:
2642                                 portage.locks.unlockdir(catdir_lock)
2643
2644         def clean_log(self):
2645                 """Discard existing log."""
2646                 settings = self.settings
2647
2648                 for x in ('.logid', 'temp/build.log'):
2649                         try:
2650                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2651                         except OSError:
2652                                 pass
2653
2654         def unlock(self):
2655                 if self._lock_obj is None:
2656                         return
2657
2658                 portage.locks.unlockdir(self._lock_obj)
2659                 self._lock_obj = None
2660                 self.locked = False
2661
2662                 catdir = self._catdir
2663                 catdir_lock = None
2664                 try:
2665                         catdir_lock = portage.locks.lockdir(catdir)
2666                 finally:
2667                         if catdir_lock:
2668                                 try:
2669                                         os.rmdir(catdir)
2670                                 except OSError, e:
2671                                         if e.errno not in (errno.ENOENT,
2672                                                 errno.ENOTEMPTY, errno.EEXIST):
2673                                                 raise
2674                                         del e
2675                                 portage.locks.unlockdir(catdir_lock)
2676
2677         class AlreadyLocked(portage.exception.PortageException):
2678                 pass
2679
2680 class EbuildBuild(CompositeTask):
2681
2682         __slots__ = ("args_set", "config_pool", "find_blockers",
2683                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2684                 "prefetcher", "settings", "world_atom") + \
2685                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2686
2687         def _start(self):
2688
2689                 logger = self.logger
2690                 opts = self.opts
2691                 pkg = self.pkg
2692                 settings = self.settings
2693                 world_atom = self.world_atom
2694                 root_config = pkg.root_config
2695                 tree = "porttree"
2696                 self._tree = tree
2697                 portdb = root_config.trees[tree].dbapi
2698                 settings.setcpv(pkg)
2699                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2700                 ebuild_path = portdb.findname(self.pkg.cpv)
2701                 self._ebuild_path = ebuild_path
2702
2703                 prefetcher = self.prefetcher
2704                 if prefetcher is None:
2705                         pass
2706                 elif not prefetcher.isAlive():
2707                         prefetcher.cancel()
2708                 elif prefetcher.poll() is None:
2709
2710                         waiting_msg = "Fetching files " + \
2711                                 "in the background. " + \
2712                                 "To view fetch progress, run `tail -f " + \
2713                                 "/var/log/emerge-fetch.log` in another " + \
2714                                 "terminal."
2715                         msg_prefix = colorize("GOOD", " * ")
2716                         from textwrap import wrap
2717                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2718                                 for line in wrap(waiting_msg, 65))
2719                         if not self.background:
2720                                 writemsg(waiting_msg, noiselevel=-1)
2721
2722                         self._current_task = prefetcher
2723                         prefetcher.addExitListener(self._prefetch_exit)
2724                         return
2725
2726                 self._prefetch_exit(prefetcher)
2727
2728         def _prefetch_exit(self, prefetcher):
2729
2730                 opts = self.opts
2731                 pkg = self.pkg
2732                 settings = self.settings
2733
2734                 if opts.fetchonly:
2735                                 fetcher = EbuildFetchonly(
2736                                         fetch_all=opts.fetch_all_uri,
2737                                         pkg=pkg, pretend=opts.pretend,
2738                                         settings=settings)
2739                                 retval = fetcher.execute()
2740                                 self.returncode = retval
2741                                 self.wait()
2742                                 return
2743
2744                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2745                         fetchall=opts.fetch_all_uri,
2746                         fetchonly=opts.fetchonly,
2747                         background=self.background,
2748                         pkg=pkg, scheduler=self.scheduler)
2749
2750                 self._start_task(fetcher, self._fetch_exit)
2751
2752         def _fetch_exit(self, fetcher):
2753                 opts = self.opts
2754                 pkg = self.pkg
2755
2756                 fetch_failed = False
2757                 if opts.fetchonly:
2758                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2759                 else:
2760                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2761
2762                 if fetch_failed and fetcher.logfile is not None and \
2763                         os.path.exists(fetcher.logfile):
2764                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2765
2766                 if not fetch_failed and fetcher.logfile is not None:
2767                         # Fetch was successful, so remove the fetch log.
2768                         try:
2769                                 os.unlink(fetcher.logfile)
2770                         except OSError:
2771                                 pass
2772
2773                 if fetch_failed or opts.fetchonly:
2774                         self.wait()
2775                         return
2776
2777                 logger = self.logger
2778                 opts = self.opts
2779                 pkg_count = self.pkg_count
2780                 scheduler = self.scheduler
2781                 settings = self.settings
2782                 features = settings.features
2783                 ebuild_path = self._ebuild_path
2784                 system_set = pkg.root_config.sets["system"]
2785
2786                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2787                 self._build_dir.lock()
2788
2789                 # Cleaning is triggered before the setup
2790                 # phase, in portage.doebuild().
2791                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2792                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2793                 short_msg = "emerge: (%s of %s) %s Clean" % \
2794                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2795                 logger.log(msg, short_msg=short_msg)
2796
2797                 #buildsyspkg: Check if we need to _force_ binary package creation
2798                 self._issyspkg = "buildsyspkg" in features and \
2799                                 system_set.findAtomForPackage(pkg) and \
2800                                 not opts.buildpkg
2801
2802                 if opts.buildpkg or self._issyspkg:
2803
2804                         self._buildpkg = True
2805
2806                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2807                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2808                         short_msg = "emerge: (%s of %s) %s Compile" % \
2809                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2810                         logger.log(msg, short_msg=short_msg)
2811
2812                 else:
2813                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2814                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2815                         short_msg = "emerge: (%s of %s) %s Compile" % \
2816                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2817                         logger.log(msg, short_msg=short_msg)
2818
2819                 build = EbuildExecuter(background=self.background, pkg=pkg,
2820                         scheduler=scheduler, settings=settings)
2821                 self._start_task(build, self._build_exit)
2822
2823         def _unlock_builddir(self):
2824                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2825                 self._build_dir.unlock()
2826
2827         def _build_exit(self, build):
2828                 if self._default_exit(build) != os.EX_OK:
2829                         self._unlock_builddir()
2830                         self.wait()
2831                         return
2832
2833                 opts = self.opts
2834                 buildpkg = self._buildpkg
2835
2836                 if not buildpkg:
2837                         self._final_exit(build)
2838                         self.wait()
2839                         return
2840
2841                 if self._issyspkg:
2842                         msg = ">>> This is a system package, " + \
2843                                 "let's pack a rescue tarball.\n"
2844
2845                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2846                         if log_path is not None:
2847                                 log_file = open(log_path, 'a')
2848                                 try:
2849                                         log_file.write(msg)
2850                                 finally:
2851                                         log_file.close()
2852
2853                         if not self.background:
2854                                 portage.writemsg_stdout(msg, noiselevel=-1)
2855
2856                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2857                         scheduler=self.scheduler, settings=self.settings)
2858
2859                 self._start_task(packager, self._buildpkg_exit)
2860
2861         def _buildpkg_exit(self, packager):
2862                 """
2863                 Released build dir lock when there is a failure or
2864                 when in buildpkgonly mode. Otherwise, the lock will
2865                 be released when merge() is called.
2866                 """
2867
2868                 if self._default_exit(packager) != os.EX_OK:
2869                         self._unlock_builddir()
2870                         self.wait()
2871                         return
2872
2873                 if self.opts.buildpkgonly:
2874                         # Need to call "clean" phase for buildpkgonly mode
2875                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2876                         phase = "clean"
2877                         clean_phase = EbuildPhase(background=self.background,
2878                                 pkg=self.pkg, phase=phase,
2879                                 scheduler=self.scheduler, settings=self.settings,
2880                                 tree=self._tree)
2881                         self._start_task(clean_phase, self._clean_exit)
2882                         return
2883
2884                 # Continue holding the builddir lock until
2885                 # after the package has been installed.
2886                 self._current_task = None
2887                 self.returncode = packager.returncode
2888                 self.wait()
2889
2890         def _clean_exit(self, clean_phase):
2891                 if self._final_exit(clean_phase) != os.EX_OK or \
2892                         self.opts.buildpkgonly:
2893                         self._unlock_builddir()
2894                 self.wait()
2895
2896         def install(self):
2897                 """
2898                 Install the package and then clean up and release locks.
2899                 Only call this after the build has completed successfully
2900                 and neither fetchonly nor buildpkgonly mode are enabled.
2901                 """
2902
2903                 find_blockers = self.find_blockers
2904                 ldpath_mtimes = self.ldpath_mtimes
2905                 logger = self.logger
2906                 pkg = self.pkg
2907                 pkg_count = self.pkg_count
2908                 settings = self.settings
2909                 world_atom = self.world_atom
2910                 ebuild_path = self._ebuild_path
2911                 tree = self._tree
2912
2913                 merge = EbuildMerge(find_blockers=self.find_blockers,
2914                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2915                         pkg_count=pkg_count, pkg_path=ebuild_path,
2916                         scheduler=self.scheduler,
2917                         settings=settings, tree=tree, world_atom=world_atom)
2918
2919                 msg = " === (%s of %s) Merging (%s::%s)" % \
2920                         (pkg_count.curval, pkg_count.maxval,
2921                         pkg.cpv, ebuild_path)
2922                 short_msg = "emerge: (%s of %s) %s Merge" % \
2923                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2924                 logger.log(msg, short_msg=short_msg)
2925
2926                 try:
2927                         rval = merge.execute()
2928                 finally:
2929                         self._unlock_builddir()
2930
2931                 return rval
2932
2933 class EbuildExecuter(CompositeTask):
2934
2935         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2936
2937         _phases = ("prepare", "configure", "compile", "test", "install")
2938
2939         _live_eclasses = frozenset([
2940                 "bzr",
2941                 "cvs",
2942                 "darcs",
2943                 "git",
2944                 "mercurial",
2945                 "subversion"
2946         ])
2947
2948         def _start(self):
2949                 self._tree = "porttree"
2950                 pkg = self.pkg
2951                 phase = "clean"
2952                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2953                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2954                 self._start_task(clean_phase, self._clean_phase_exit)
2955
2956         def _clean_phase_exit(self, clean_phase):
2957
2958                 if self._default_exit(clean_phase) != os.EX_OK:
2959                         self.wait()
2960                         return
2961
2962                 pkg = self.pkg
2963                 scheduler = self.scheduler
2964                 settings = self.settings
2965                 cleanup = 1
2966
2967                 # This initializes PORTAGE_LOG_FILE.
2968                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2969
2970                 setup_phase = EbuildPhase(background=self.background,
2971                         pkg=pkg, phase="setup", scheduler=scheduler,
2972                         settings=settings, tree=self._tree)
2973
2974                 setup_phase.addExitListener(self._setup_exit)
2975                 self._current_task = setup_phase
2976                 self.scheduler.scheduleSetup(setup_phase)
2977
2978         def _setup_exit(self, setup_phase):
2979
2980                 if self._default_exit(setup_phase) != os.EX_OK:
2981                         self.wait()
2982                         return
2983
2984                 unpack_phase = EbuildPhase(background=self.background,
2985                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2986                         settings=self.settings, tree=self._tree)
2987
2988                 if self._live_eclasses.intersection(self.pkg.inherited):
2989                         # Serialize $DISTDIR access for live ebuilds since
2990                         # otherwise they can interfere with eachother.
2991
2992                         unpack_phase.addExitListener(self._unpack_exit)
2993                         self._current_task = unpack_phase
2994                         self.scheduler.scheduleUnpack(unpack_phase)
2995
2996                 else:
2997                         self._start_task(unpack_phase, self._unpack_exit)
2998
2999         def _unpack_exit(self, unpack_phase):
3000
3001                 if self._default_exit(unpack_phase) != os.EX_OK:
3002                         self.wait()
3003                         return
3004
3005                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3006
3007                 pkg = self.pkg
3008                 phases = self._phases
3009                 eapi = pkg.metadata["EAPI"]
3010                 if eapi in ("0", "1"):
3011                         # skip src_prepare and src_configure
3012                         phases = phases[2:]
3013
3014                 for phase in phases:
3015                         ebuild_phases.add(EbuildPhase(background=self.background,
3016                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3017                                 settings=self.settings, tree=self._tree))
3018
3019                 self._start_task(ebuild_phases, self._default_final_exit)
3020
3021 class EbuildMetadataPhase(SubProcess):
3022
3023         """
3024         Asynchronous interface for the ebuild "depend" phase which is
3025         used to extract metadata from the ebuild.
3026         """
3027
3028         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3029                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3030                 ("_raw_metadata",)
3031
3032         _file_names = ("ebuild",)
3033         _files_dict = slot_dict_class(_file_names, prefix="")
3034         _metadata_fd = 9
3035
3036         def _start(self):
3037                 settings = self.settings
3038                 settings.setcpv(self.cpv)
3039                 ebuild_path = self.ebuild_path
3040
3041                 eapi = None
3042                 if 'parse-eapi-glep-55' in settings.features:
3043                         pf, eapi = portage._split_ebuild_name_glep55(
3044                                 os.path.basename(ebuild_path))
3045                 if eapi is None and \
3046                         'parse-eapi-ebuild-head' in settings.features:
3047                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3048                                 mode='r', encoding='utf_8', errors='replace'))
3049
3050                 if eapi is not None:
3051                         if not portage.eapi_is_supported(eapi):
3052                                 self.metadata_callback(self.cpv, self.ebuild_path,
3053                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3054                                 self.returncode = os.EX_OK
3055                                 self.wait()
3056                                 return
3057
3058                         settings.configdict['pkg']['EAPI'] = eapi
3059
3060                 debug = settings.get("PORTAGE_DEBUG") == "1"
3061                 master_fd = None
3062                 slave_fd = None
3063                 fd_pipes = None
3064                 if self.fd_pipes is not None:
3065                         fd_pipes = self.fd_pipes.copy()
3066                 else:
3067                         fd_pipes = {}
3068
3069                 fd_pipes.setdefault(0, sys.stdin.fileno())
3070                 fd_pipes.setdefault(1, sys.stdout.fileno())
3071                 fd_pipes.setdefault(2, sys.stderr.fileno())
3072
3073                 # flush any pending output
3074                 for fd in fd_pipes.itervalues():
3075                         if fd == sys.stdout.fileno():
3076                                 sys.stdout.flush()
3077                         if fd == sys.stderr.fileno():
3078                                 sys.stderr.flush()
3079
3080                 fd_pipes_orig = fd_pipes.copy()
3081                 self._files = self._files_dict()
3082                 files = self._files
3083
3084                 master_fd, slave_fd = os.pipe()
3085                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3086                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3087
3088                 fd_pipes[self._metadata_fd] = slave_fd
3089
3090                 self._raw_metadata = []
3091                 files.ebuild = os.fdopen(master_fd, 'r')
3092                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3093                         self._registered_events, self._output_handler)
3094                 self._registered = True
3095
3096                 retval = portage.doebuild(ebuild_path, "depend",
3097                         settings["ROOT"], settings, debug,
3098                         mydbapi=self.portdb, tree="porttree",
3099                         fd_pipes=fd_pipes, returnpid=True)
3100
3101                 os.close(slave_fd)
3102
3103                 if isinstance(retval, int):
3104                         # doebuild failed before spawning
3105                         self._unregister()
3106                         self.returncode = retval
3107                         self.wait()
3108                         return
3109
3110                 self.pid = retval[0]
3111                 portage.process.spawned_pids.remove(self.pid)
3112
3113         def _output_handler(self, fd, event):
3114
3115                 if event & PollConstants.POLLIN:
3116                         self._raw_metadata.append(self._files.ebuild.read())
3117                         if not self._raw_metadata[-1]:
3118                                 self._unregister()
3119                                 self.wait()
3120
3121                 self._unregister_if_appropriate(event)
3122                 return self._registered
3123
3124         def _set_returncode(self, wait_retval):
3125                 SubProcess._set_returncode(self, wait_retval)
3126                 if self.returncode == os.EX_OK:
3127                         metadata_lines = "".join(self._raw_metadata).splitlines()
3128                         if len(portage.auxdbkeys) != len(metadata_lines):
3129                                 # Don't trust bash's returncode if the
3130                                 # number of lines is incorrect.
3131                                 self.returncode = 1
3132                         else:
3133                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3134                                 self.metadata = self.metadata_callback(self.cpv,
3135                                         self.ebuild_path, self.repo_path, metadata,
3136                                         self.ebuild_mtime)
3137
3138 class EbuildProcess(SpawnProcess):
3139
3140         __slots__ = ("phase", "pkg", "settings", "tree")
3141
3142         def _start(self):
3143                 # Don't open the log file during the clean phase since the
3144                 # open file can result in an nfs lock on $T/build.log which
3145                 # prevents the clean phase from removing $T.
3146                 if self.phase not in ("clean", "cleanrm"):
3147                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3148                 SpawnProcess._start(self)
3149
3150         def _pipe(self, fd_pipes):
3151                 stdout_pipe = fd_pipes.get(1)
3152                 got_pty, master_fd, slave_fd = \
3153                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3154                 return (master_fd, slave_fd)
3155
3156         def _spawn(self, args, **kwargs):
3157
3158                 root_config = self.pkg.root_config
3159                 tree = self.tree
3160                 mydbapi = root_config.trees[tree].dbapi
3161                 settings = self.settings
3162                 ebuild_path = settings["EBUILD"]
3163                 debug = settings.get("PORTAGE_DEBUG") == "1"
3164
3165                 rval = portage.doebuild(ebuild_path, self.phase,
3166                         root_config.root, settings, debug,
3167                         mydbapi=mydbapi, tree=tree, **kwargs)
3168
3169                 return rval
3170
3171         def _set_returncode(self, wait_retval):
3172                 SpawnProcess._set_returncode(self, wait_retval)
3173
3174                 if self.phase not in ("clean", "cleanrm"):
3175                         self.returncode = portage._doebuild_exit_status_check_and_log(
3176                                 self.settings, self.phase, self.returncode)
3177
3178                 if self.phase == "test" and self.returncode != os.EX_OK and \
3179                         "test-fail-continue" in self.settings.features:
3180                         self.returncode = os.EX_OK
3181
3182                 portage._post_phase_userpriv_perms(self.settings)
3183
3184 class EbuildPhase(CompositeTask):
3185
3186         __slots__ = ("background", "pkg", "phase",
3187                 "scheduler", "settings", "tree")
3188
3189         _post_phase_cmds = portage._post_phase_cmds
3190
3191         def _start(self):
3192
3193                 ebuild_process = EbuildProcess(background=self.background,
3194                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3195                         settings=self.settings, tree=self.tree)
3196
3197                 self._start_task(ebuild_process, self._ebuild_exit)
3198
3199         def _ebuild_exit(self, ebuild_process):
3200
3201                 if self.phase == "install":
3202                         out = None
3203                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3204                         log_file = None
3205                         if self.background and log_path is not None:
3206                                 log_file = open(log_path, 'a')
3207                                 out = log_file
3208                         try:
3209                                 portage._check_build_log(self.settings, out=out)
3210                         finally:
3211                                 if log_file is not None:
3212                                         log_file.close()
3213
3214                 if self._default_exit(ebuild_process) != os.EX_OK:
3215                         self.wait()
3216                         return
3217
3218                 settings = self.settings
3219
3220                 if self.phase == "install":
3221                         portage._post_src_install_chost_fix(settings)
3222                         portage._post_src_install_uid_fix(settings)
3223
3224                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3225                 if post_phase_cmds is not None:
3226                         post_phase = MiscFunctionsProcess(background=self.background,
3227                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3228                                 scheduler=self.scheduler, settings=settings)
3229                         self._start_task(post_phase, self._post_phase_exit)
3230                         return
3231
3232                 self.returncode = ebuild_process.returncode
3233                 self._current_task = None
3234                 self.wait()
3235
3236         def _post_phase_exit(self, post_phase):
3237                 if self._final_exit(post_phase) != os.EX_OK:
3238                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3239                                 noiselevel=-1)
3240                 self._current_task = None
3241                 self.wait()
3242                 return
3243
3244 class EbuildBinpkg(EbuildProcess):
3245         """
3246         This assumes that src_install() has successfully completed.
3247         """
3248         __slots__ = ("_binpkg_tmpfile",)
3249
3250         def _start(self):
3251                 self.phase = "package"
3252                 self.tree = "porttree"
3253                 pkg = self.pkg
3254                 root_config = pkg.root_config
3255                 portdb = root_config.trees["porttree"].dbapi
3256                 bintree = root_config.trees["bintree"]
3257                 ebuild_path = portdb.findname(self.pkg.cpv)
3258                 settings = self.settings
3259                 debug = settings.get("PORTAGE_DEBUG") == "1"
3260
3261                 bintree.prevent_collision(pkg.cpv)
3262                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3263                         pkg.cpv + ".tbz2." + str(os.getpid()))
3264                 self._binpkg_tmpfile = binpkg_tmpfile
3265                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3266                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3267
3268                 try:
3269                         EbuildProcess._start(self)
3270                 finally:
3271                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3272
3273         def _set_returncode(self, wait_retval):
3274                 EbuildProcess._set_returncode(self, wait_retval)
3275
3276                 pkg = self.pkg
3277                 bintree = pkg.root_config.trees["bintree"]
3278                 binpkg_tmpfile = self._binpkg_tmpfile
3279                 if self.returncode == os.EX_OK:
3280                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3281
3282 class EbuildMerge(SlotObject):
3283
3284         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3285                 "pkg", "pkg_count", "pkg_path", "pretend",
3286                 "scheduler", "settings", "tree", "world_atom")
3287
3288         def execute(self):
3289                 root_config = self.pkg.root_config
3290                 settings = self.settings
3291                 retval = portage.merge(settings["CATEGORY"],
3292                         settings["PF"], settings["D"],
3293                         os.path.join(settings["PORTAGE_BUILDDIR"],
3294                         "build-info"), root_config.root, settings,
3295                         myebuild=settings["EBUILD"],
3296                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3297                         vartree=root_config.trees["vartree"],
3298                         prev_mtimes=self.ldpath_mtimes,
3299                         scheduler=self.scheduler,
3300                         blockers=self.find_blockers)
3301
3302                 if retval == os.EX_OK:
3303                         self.world_atom(self.pkg)
3304                         self._log_success()
3305
3306                 return retval
3307
3308         def _log_success(self):
3309                 pkg = self.pkg
3310                 pkg_count = self.pkg_count
3311                 pkg_path = self.pkg_path
3312                 logger = self.logger
3313                 if "noclean" not in self.settings.features:
3314                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3315                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3316                         logger.log((" === (%s of %s) " + \
3317                                 "Post-Build Cleaning (%s::%s)") % \
3318                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3319                                 short_msg=short_msg)
3320                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3321                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3322
3323 class PackageUninstall(AsynchronousTask):
3324
3325         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3326
3327         def _start(self):
3328                 try:
3329                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3330                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3331                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3332                                 writemsg_level=self._writemsg_level)
3333                 except UninstallFailure, e:
3334                         self.returncode = e.status
3335                 else:
3336                         self.returncode = os.EX_OK
3337                 self.wait()
3338
3339         def _writemsg_level(self, msg, level=0, noiselevel=0):
3340
3341                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3342                 background = self.background
3343
3344                 if log_path is None:
3345                         if not (background and level < logging.WARNING):
3346                                 portage.util.writemsg_level(msg,
3347                                         level=level, noiselevel=noiselevel)
3348                 else:
3349                         if not background:
3350                                 portage.util.writemsg_level(msg,
3351                                         level=level, noiselevel=noiselevel)
3352
3353                         f = open(log_path, 'a')
3354                         try:
3355                                 f.write(msg)
3356                         finally:
3357                                 f.close()
3358
3359 class Binpkg(CompositeTask):
3360
3361         __slots__ = ("find_blockers",
3362                 "ldpath_mtimes", "logger", "opts",
3363                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3364                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3365                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3366
3367         def _writemsg_level(self, msg, level=0, noiselevel=0):
3368
3369                 if not self.background:
3370                         portage.util.writemsg_level(msg,
3371                                 level=level, noiselevel=noiselevel)
3372
3373                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3374                 if  log_path is not None:
3375                         f = open(log_path, 'a')
3376                         try:
3377                                 f.write(msg)
3378                         finally:
3379                                 f.close()
3380
3381         def _start(self):
3382
3383                 pkg = self.pkg
3384                 settings = self.settings
3385                 settings.setcpv(pkg)
3386                 self._tree = "bintree"
3387                 self._bintree = self.pkg.root_config.trees[self._tree]
3388                 self._verify = not self.opts.pretend
3389
3390                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3391                         "portage", pkg.category, pkg.pf)
3392                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3393                         pkg=pkg, settings=settings)
3394                 self._image_dir = os.path.join(dir_path, "image")
3395                 self._infloc = os.path.join(dir_path, "build-info")
3396                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3397                 settings["EBUILD"] = self._ebuild_path
3398                 debug = settings.get("PORTAGE_DEBUG") == "1"
3399                 portage.doebuild_environment(self._ebuild_path, "setup",
3400                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3401                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3402
3403                 # The prefetcher has already completed or it
3404                 # could be running now. If it's running now,
3405                 # wait for it to complete since it holds
3406                 # a lock on the file being fetched. The
3407                 # portage.locks functions are only designed
3408                 # to work between separate processes. Since
3409                 # the lock is held by the current process,
3410                 # use the scheduler and fetcher methods to
3411                 # synchronize with the fetcher.
3412                 prefetcher = self.prefetcher
3413                 if prefetcher is None:
3414                         pass
3415                 elif not prefetcher.isAlive():
3416                         prefetcher.cancel()
3417                 elif prefetcher.poll() is None:
3418
3419                         waiting_msg = ("Fetching '%s' " + \
3420                                 "in the background. " + \
3421                                 "To view fetch progress, run `tail -f " + \
3422                                 "/var/log/emerge-fetch.log` in another " + \
3423                                 "terminal.") % prefetcher.pkg_path
3424                         msg_prefix = colorize("GOOD", " * ")
3425                         from textwrap import wrap
3426                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3427                                 for line in wrap(waiting_msg, 65))
3428                         if not self.background:
3429                                 writemsg(waiting_msg, noiselevel=-1)
3430
3431                         self._current_task = prefetcher
3432                         prefetcher.addExitListener(self._prefetch_exit)
3433                         return
3434
3435                 self._prefetch_exit(prefetcher)
3436
3437         def _prefetch_exit(self, prefetcher):
3438
3439                 pkg = self.pkg
3440                 pkg_count = self.pkg_count
3441                 if not (self.opts.pretend or self.opts.fetchonly):
3442                         self._build_dir.lock()
3443                         # If necessary, discard old log so that we don't
3444                         # append to it.
3445                         self._build_dir.clean_log()
3446                         # Initialze PORTAGE_LOG_FILE.
3447                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3448                 fetcher = BinpkgFetcher(background=self.background,
3449                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3450                         pretend=self.opts.pretend, scheduler=self.scheduler)
3451                 pkg_path = fetcher.pkg_path
3452                 self._pkg_path = pkg_path
3453
3454                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3455
3456                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3457                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3458                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3459                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3460                         self.logger.log(msg, short_msg=short_msg)
3461                         self._start_task(fetcher, self._fetcher_exit)
3462                         return
3463
3464                 self._fetcher_exit(fetcher)
3465
3466         def _fetcher_exit(self, fetcher):
3467
3468                 # The fetcher only has a returncode when
3469                 # --getbinpkg is enabled.
3470                 if fetcher.returncode is not None:
3471                         self._fetched_pkg = True
3472                         if self._default_exit(fetcher) != os.EX_OK:
3473                                 self._unlock_builddir()
3474                                 self.wait()
3475                                 return
3476
3477                 if self.opts.pretend:
3478                         self._current_task = None
3479                         self.returncode = os.EX_OK
3480                         self.wait()
3481                         return
3482
3483                 verifier = None
3484                 if self._verify:
3485                         logfile = None
3486                         if self.background:
3487                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3488                         verifier = BinpkgVerifier(background=self.background,
3489                                 logfile=logfile, pkg=self.pkg)
3490                         self._start_task(verifier, self._verifier_exit)
3491                         return
3492
3493                 self._verifier_exit(verifier)
3494
3495         def _verifier_exit(self, verifier):
3496                 if verifier is not None and \
3497                         self._default_exit(verifier) != os.EX_OK:
3498                         self._unlock_builddir()
3499                         self.wait()
3500                         return
3501
3502                 logger = self.logger
3503                 pkg = self.pkg
3504                 pkg_count = self.pkg_count
3505                 pkg_path = self._pkg_path
3506
3507                 if self._fetched_pkg:
3508                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3509
3510                 if self.opts.fetchonly:
3511                         self._current_task = None
3512                         self.returncode = os.EX_OK
3513                         self.wait()
3514                         return
3515
3516                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3517                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3518                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3519                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3520                 logger.log(msg, short_msg=short_msg)
3521
3522                 phase = "clean"
3523                 settings = self.settings
3524                 ebuild_phase = EbuildPhase(background=self.background,
3525                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3526                         settings=settings, tree=self._tree)
3527
3528                 self._start_task(ebuild_phase, self._clean_exit)
3529
3530         def _clean_exit(self, clean_phase):
3531                 if self._default_exit(clean_phase) != os.EX_OK:
3532                         self._unlock_builddir()
3533                         self.wait()
3534                         return
3535
3536                 dir_path = self._build_dir.dir_path
3537
3538                 infloc = self._infloc
3539                 pkg = self.pkg
3540                 pkg_path = self._pkg_path
3541
3542                 dir_mode = 0755
3543                 for mydir in (dir_path, self._image_dir, infloc):
3544                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3545                                 gid=portage.data.portage_gid, mode=dir_mode)
3546
3547                 # This initializes PORTAGE_LOG_FILE.
3548                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3549                 self._writemsg_level(">>> Extracting info\n")
3550
3551                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3552                 check_missing_metadata = ("CATEGORY", "PF")
3553                 missing_metadata = set()
3554                 for k in check_missing_metadata:
3555                         v = pkg_xpak.getfile(k)
3556                         if not v:
3557                                 missing_metadata.add(k)
3558
3559                 pkg_xpak.unpackinfo(infloc)
3560                 for k in missing_metadata:
3561                         if k == "CATEGORY":
3562                                 v = pkg.category
3563                         elif k == "PF":
3564                                 v = pkg.pf
3565                         else:
3566                                 continue
3567
3568                         f = open(os.path.join(infloc, k), 'wb')
3569                         try:
3570                                 f.write(v + "\n")
3571                         finally:
3572                                 f.close()
3573
3574                 # Store the md5sum in the vdb.
3575                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3576                 try:
3577                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3578                 finally:
3579                         f.close()
3580
3581                 # This gives bashrc users an opportunity to do various things
3582                 # such as remove binary packages after they're installed.
3583                 settings = self.settings
3584                 settings.setcpv(self.pkg)
3585                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3586                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3587
3588                 phase = "setup"
3589                 setup_phase = EbuildPhase(background=self.background,
3590                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3591                         settings=settings, tree=self._tree)
3592
3593                 setup_phase.addExitListener(self._setup_exit)
3594                 self._current_task = setup_phase
3595                 self.scheduler.scheduleSetup(setup_phase)
3596
3597         def _setup_exit(self, setup_phase):
3598                 if self._default_exit(setup_phase) != os.EX_OK:
3599                         self._unlock_builddir()
3600                         self.wait()
3601                         return
3602
3603                 extractor = BinpkgExtractorAsync(background=self.background,
3604                         image_dir=self._image_dir,
3605                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3606                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3607                 self._start_task(extractor, self._extractor_exit)
3608
3609         def _extractor_exit(self, extractor):
3610                 if self._final_exit(extractor) != os.EX_OK:
3611                         self._unlock_builddir()
3612                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3613                                 noiselevel=-1)
3614                 self.wait()
3615
3616         def _unlock_builddir(self):
3617                 if self.opts.pretend or self.opts.fetchonly:
3618                         return
3619                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3620                 self._build_dir.unlock()
3621
3622         def install(self):
3623
3624                 # This gives bashrc users an opportunity to do various things
3625                 # such as remove binary packages after they're installed.
3626                 settings = self.settings
3627                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3628                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3629
3630                 merge = EbuildMerge(find_blockers=self.find_blockers,
3631                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3632                         pkg=self.pkg, pkg_count=self.pkg_count,
3633                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3634                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3635
3636                 try:
3637                         retval = merge.execute()
3638                 finally:
3639                         settings.pop("PORTAGE_BINPKG_FILE", None)
3640                         self._unlock_builddir()
3641                 return retval
3642
3643 class BinpkgFetcher(SpawnProcess):
3644
3645         __slots__ = ("pkg", "pretend",
3646                 "locked", "pkg_path", "_lock_obj")
3647
3648         def __init__(self, **kwargs):
3649                 SpawnProcess.__init__(self, **kwargs)
3650                 pkg = self.pkg
3651                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3652
3653         def _start(self):
3654
3655                 if self.cancelled:
3656                         return
3657
3658                 pkg = self.pkg
3659                 pretend = self.pretend
3660                 bintree = pkg.root_config.trees["bintree"]
3661                 settings = bintree.settings
3662                 use_locks = "distlocks" in settings.features
3663                 pkg_path = self.pkg_path
3664
3665                 if not pretend:
3666                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3667                         if use_locks:
3668                                 self.lock()
3669                 exists = os.path.exists(pkg_path)
3670                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3671                 if not (pretend or resume):
3672                         # Remove existing file or broken symlink.
3673                         try:
3674                                 os.unlink(pkg_path)
3675                         except OSError:
3676                                 pass
3677
3678                 # urljoin doesn't work correctly with
3679                 # unrecognized protocols like sftp
3680                 if bintree._remote_has_index:
3681                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3682                         if not rel_uri:
3683                                 rel_uri = pkg.cpv + ".tbz2"
3684                         uri = bintree._remote_base_uri.rstrip("/") + \
3685                                 "/" + rel_uri.lstrip("/")
3686                 else:
3687                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3688                                 "/" + pkg.pf + ".tbz2"
3689
3690                 if pretend:
3691                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3692                         self.returncode = os.EX_OK
3693                         self.wait()
3694                         return
3695
3696                 protocol = urlparse.urlparse(uri)[0]
3697                 fcmd_prefix = "FETCHCOMMAND"
3698                 if resume:
3699                         fcmd_prefix = "RESUMECOMMAND"
3700                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3701                 if not fcmd:
3702                         fcmd = settings.get(fcmd_prefix)
3703
3704                 fcmd_vars = {
3705                         "DISTDIR" : os.path.dirname(pkg_path),
3706                         "URI"     : uri,
3707                         "FILE"    : os.path.basename(pkg_path)
3708                 }
3709
3710                 fetch_env = dict(settings.iteritems())
3711                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3712                         for x in shlex.split(fcmd)]
3713
3714                 if self.fd_pipes is None:
3715                         self.fd_pipes = {}
3716                 fd_pipes = self.fd_pipes
3717
3718                 # Redirect all output to stdout since some fetchers like
3719                 # wget pollute stderr (if portage detects a problem then it
3720                 # can send it's own message to stderr).
3721                 fd_pipes.setdefault(0, sys.stdin.fileno())
3722                 fd_pipes.setdefault(1, sys.stdout.fileno())
3723                 fd_pipes.setdefault(2, sys.stdout.fileno())
3724
3725                 self.args = fetch_args
3726                 self.env = fetch_env
3727                 SpawnProcess._start(self)
3728
3729         def _set_returncode(self, wait_retval):
3730                 SpawnProcess._set_returncode(self, wait_retval)
3731                 if self.returncode == os.EX_OK:
3732                         # If possible, update the mtime to match the remote package if
3733                         # the fetcher didn't already do it automatically.
3734                         bintree = self.pkg.root_config.trees["bintree"]
3735                         if bintree._remote_has_index:
3736                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3737                                 if remote_mtime is not None:
3738                                         try:
3739                                                 remote_mtime = long(remote_mtime)
3740                                         except ValueError:
3741                                                 pass
3742                                         else:
3743                                                 try:
3744                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3745                                                 except OSError:
3746                                                         pass
3747                                                 else:
3748                                                         if remote_mtime != local_mtime:
3749                                                                 try:
3750                                                                         os.utime(self.pkg_path,
3751                                                                                 (remote_mtime, remote_mtime))
3752                                                                 except OSError:
3753                                                                         pass
3754
3755                 if self.locked:
3756                         self.unlock()
3757
3758         def lock(self):
3759                 """
3760                 This raises an AlreadyLocked exception if lock() is called
3761                 while a lock is already held. In order to avoid this, call
3762                 unlock() or check whether the "locked" attribute is True
3763                 or False before calling lock().
3764                 """
3765                 if self._lock_obj is not None:
3766                         raise self.AlreadyLocked((self._lock_obj,))
3767
3768                 self._lock_obj = portage.locks.lockfile(
3769                         self.pkg_path, wantnewlockfile=1)
3770                 self.locked = True
3771
3772         class AlreadyLocked(portage.exception.PortageException):
3773                 pass
3774
3775         def unlock(self):
3776                 if self._lock_obj is None:
3777                         return
3778                 portage.locks.unlockfile(self._lock_obj)
3779                 self._lock_obj = None
3780                 self.locked = False
3781
3782 class BinpkgVerifier(AsynchronousTask):
3783         __slots__ = ("logfile", "pkg",)
3784
3785         def _start(self):
3786                 """
3787                 Note: Unlike a normal AsynchronousTask.start() method,
3788                 this one does all work is synchronously. The returncode
3789                 attribute will be set before it returns.
3790                 """
3791
3792                 pkg = self.pkg
3793                 root_config = pkg.root_config
3794                 bintree = root_config.trees["bintree"]
3795                 rval = os.EX_OK
3796                 stdout_orig = sys.stdout
3797                 stderr_orig = sys.stderr
3798                 log_file = None
3799                 if self.background and self.logfile is not None:
3800                         log_file = open(self.logfile, 'a')
3801                 try:
3802                         if log_file is not None:
3803                                 sys.stdout = log_file
3804                                 sys.stderr = log_file
3805                         try:
3806                                 bintree.digestCheck(pkg)
3807                         except portage.exception.FileNotFound:
3808                                 writemsg("!!! Fetching Binary failed " + \
3809                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3810                                 rval = 1
3811                         except portage.exception.DigestException, e:
3812                                 writemsg("\n!!! Digest verification failed:\n",
3813                                         noiselevel=-1)
3814                                 writemsg("!!! %s\n" % e.value[0],
3815                                         noiselevel=-1)
3816                                 writemsg("!!! Reason: %s\n" % e.value[1],
3817                                         noiselevel=-1)
3818                                 writemsg("!!! Got: %s\n" % e.value[2],
3819                                         noiselevel=-1)
3820                                 writemsg("!!! Expected: %s\n" % e.value[3],
3821                                         noiselevel=-1)
3822                                 rval = 1
3823                         if rval != os.EX_OK:
3824                                 pkg_path = bintree.getname(pkg.cpv)
3825                                 head, tail = os.path.split(pkg_path)
3826                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3827                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3828                                         noiselevel=-1)
3829                 finally:
3830                         sys.stdout = stdout_orig
3831                         sys.stderr = stderr_orig
3832                         if log_file is not None:
3833                                 log_file.close()
3834
3835                 self.returncode = rval
3836                 self.wait()
3837
3838 class BinpkgPrefetcher(CompositeTask):
3839
3840         __slots__ = ("pkg",) + \
3841                 ("pkg_path", "_bintree",)
3842
3843         def _start(self):
3844                 self._bintree = self.pkg.root_config.trees["bintree"]
3845                 fetcher = BinpkgFetcher(background=self.background,
3846                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3847                         scheduler=self.scheduler)
3848                 self.pkg_path = fetcher.pkg_path
3849                 self._start_task(fetcher, self._fetcher_exit)
3850
3851         def _fetcher_exit(self, fetcher):
3852
3853                 if self._default_exit(fetcher) != os.EX_OK:
3854                         self.wait()
3855                         return
3856
3857                 verifier = BinpkgVerifier(background=self.background,
3858                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3859                 self._start_task(verifier, self._verifier_exit)
3860
3861         def _verifier_exit(self, verifier):
3862                 if self._default_exit(verifier) != os.EX_OK:
3863                         self.wait()
3864                         return
3865
3866                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3867
3868                 self._current_task = None
3869                 self.returncode = os.EX_OK
3870                 self.wait()
3871
3872 class BinpkgExtractorAsync(SpawnProcess):
3873
3874         __slots__ = ("image_dir", "pkg", "pkg_path")
3875
3876         _shell_binary = portage.const.BASH_BINARY
3877
3878         def _start(self):
3879                 self.args = [self._shell_binary, "-c",
3880                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3881                         (portage._shell_quote(self.pkg_path),
3882                         portage._shell_quote(self.image_dir))]
3883
3884                 self.env = self.pkg.root_config.settings.environ()
3885                 SpawnProcess._start(self)
3886
3887 class MergeListItem(CompositeTask):
3888
3889         """
3890         TODO: For parallel scheduling, everything here needs asynchronous
3891         execution support (start, poll, and wait methods).
3892         """
3893
3894         __slots__ = ("args_set",
3895                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3896                 "find_blockers", "logger", "mtimedb", "pkg",
3897                 "pkg_count", "pkg_to_replace", "prefetcher",
3898                 "settings", "statusMessage", "world_atom") + \
3899                 ("_install_task",)
3900
3901         def _start(self):
3902
3903                 pkg = self.pkg
3904                 build_opts = self.build_opts
3905
3906                 if pkg.installed:
3907                         # uninstall,  executed by self.merge()
3908                         self.returncode = os.EX_OK
3909                         self.wait()
3910                         return
3911
3912                 args_set = self.args_set
3913                 find_blockers = self.find_blockers
3914                 logger = self.logger
3915                 mtimedb = self.mtimedb
3916                 pkg_count = self.pkg_count
3917                 scheduler = self.scheduler
3918                 settings = self.settings
3919                 world_atom = self.world_atom
3920                 ldpath_mtimes = mtimedb["ldpath"]
3921
3922                 action_desc = "Emerging"
3923                 preposition = "for"
3924                 if pkg.type_name == "binary":
3925                         action_desc += " binary"
3926
3927                 if build_opts.fetchonly:
3928                         action_desc = "Fetching"
3929
3930                 msg = "%s (%s of %s) %s" % \
3931                         (action_desc,
3932                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3933                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3934                         colorize("GOOD", pkg.cpv))
3935
3936                 portdb = pkg.root_config.trees["porttree"].dbapi
3937                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3938                 if portdir_repo_name:
3939                         pkg_repo_name = pkg.metadata.get("repository")
3940                         if pkg_repo_name != portdir_repo_name:
3941                                 if not pkg_repo_name:
3942                                         pkg_repo_name = "unknown repo"
3943                                 msg += " from %s" % pkg_repo_name
3944
3945                 if pkg.root != "/":
3946                         msg += " %s %s" % (preposition, pkg.root)
3947
3948                 if not build_opts.pretend:
3949                         self.statusMessage(msg)
3950                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3951                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3952
3953                 if pkg.type_name == "ebuild":
3954
3955                         build = EbuildBuild(args_set=args_set,
3956                                 background=self.background,
3957                                 config_pool=self.config_pool,
3958                                 find_blockers=find_blockers,
3959                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3960                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3961                                 prefetcher=self.prefetcher, scheduler=scheduler,
3962                                 settings=settings, world_atom=world_atom)
3963
3964                         self._install_task = build
3965                         self._start_task(build, self._default_final_exit)
3966                         return
3967
3968                 elif pkg.type_name == "binary":
3969
3970                         binpkg = Binpkg(background=self.background,
3971                                 find_blockers=find_blockers,
3972                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3973                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3974                                 prefetcher=self.prefetcher, settings=settings,
3975                                 scheduler=scheduler, world_atom=world_atom)
3976
3977                         self._install_task = binpkg
3978                         self._start_task(binpkg, self._default_final_exit)
3979                         return
3980
3981         def _poll(self):
3982                 self._install_task.poll()
3983                 return self.returncode
3984
3985         def _wait(self):
3986                 self._install_task.wait()
3987                 return self.returncode
3988
3989         def merge(self):
3990
3991                 pkg = self.pkg
3992                 build_opts = self.build_opts
3993                 find_blockers = self.find_blockers
3994                 logger = self.logger
3995                 mtimedb = self.mtimedb
3996                 pkg_count = self.pkg_count
3997                 prefetcher = self.prefetcher
3998                 scheduler = self.scheduler
3999                 settings = self.settings
4000                 world_atom = self.world_atom
4001                 ldpath_mtimes = mtimedb["ldpath"]
4002
4003                 if pkg.installed:
4004                         if not (build_opts.buildpkgonly or \
4005                                 build_opts.fetchonly or build_opts.pretend):
4006
4007                                 uninstall = PackageUninstall(background=self.background,
4008                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4009                                         pkg=pkg, scheduler=scheduler, settings=settings)
4010
4011                                 uninstall.start()
4012                                 retval = uninstall.wait()
4013                                 if retval != os.EX_OK:
4014                                         return retval
4015                         return os.EX_OK
4016
4017                 if build_opts.fetchonly or \
4018                         build_opts.buildpkgonly:
4019                         return self.returncode
4020
4021                 retval = self._install_task.install()
4022                 return retval
4023
4024 class PackageMerge(AsynchronousTask):
4025         """
4026         TODO: Implement asynchronous merge so that the scheduler can
4027         run while a merge is executing.
4028         """
4029
4030         __slots__ = ("merge",)
4031
4032         def _start(self):
4033
4034                 pkg = self.merge.pkg
4035                 pkg_count = self.merge.pkg_count
4036
4037                 if pkg.installed:
4038                         action_desc = "Uninstalling"
4039                         preposition = "from"
4040                         counter_str = ""
4041                 else:
4042                         action_desc = "Installing"
4043                         preposition = "to"
4044                         counter_str = "(%s of %s) " % \
4045                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4046                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4047
4048                 msg = "%s %s%s" % \
4049                         (action_desc,
4050                         counter_str,
4051                         colorize("GOOD", pkg.cpv))
4052
4053                 if pkg.root != "/":
4054                         msg += " %s %s" % (preposition, pkg.root)
4055
4056                 if not self.merge.build_opts.fetchonly and \
4057                         not self.merge.build_opts.pretend and \
4058                         not self.merge.build_opts.buildpkgonly:
4059                         self.merge.statusMessage(msg)
4060
4061                 self.returncode = self.merge.merge()
4062                 self.wait()
4063
4064 class DependencyArg(object):
4065         def __init__(self, arg=None, root_config=None):
4066                 self.arg = arg
4067                 self.root_config = root_config
4068
4069         def __str__(self):
4070                 return str(self.arg)
4071
4072 class AtomArg(DependencyArg):
4073         def __init__(self, atom=None, **kwargs):
4074                 DependencyArg.__init__(self, **kwargs)
4075                 self.atom = atom
4076                 if not isinstance(self.atom, portage.dep.Atom):
4077                         self.atom = portage.dep.Atom(self.atom)
4078                 self.set = (self.atom, )
4079
4080 class PackageArg(DependencyArg):
4081         def __init__(self, package=None, **kwargs):
4082                 DependencyArg.__init__(self, **kwargs)
4083                 self.package = package
4084                 self.atom = portage.dep.Atom("=" + package.cpv)
4085                 self.set = (self.atom, )
4086
4087 class SetArg(DependencyArg):
4088         def __init__(self, set=None, **kwargs):
4089                 DependencyArg.__init__(self, **kwargs)
4090                 self.set = set
4091                 self.name = self.arg[len(SETPREFIX):]
4092
4093 class Dependency(SlotObject):
4094         __slots__ = ("atom", "blocker", "depth",
4095                 "parent", "onlydeps", "priority", "root")
4096         def __init__(self, **kwargs):
4097                 SlotObject.__init__(self, **kwargs)
4098                 if self.priority is None:
4099                         self.priority = DepPriority()
4100                 if self.depth is None:
4101                         self.depth = 0
4102
4103 class BlockerCache(portage.cache.mappings.MutableMapping):
4104         """This caches blockers of installed packages so that dep_check does not
4105         have to be done for every single installed package on every invocation of
4106         emerge.  The cache is invalidated whenever it is detected that something
4107         has changed that might alter the results of dep_check() calls:
4108                 1) the set of installed packages (including COUNTER) has changed
4109                 2) the old-style virtuals have changed
4110         """
4111
4112         # Number of uncached packages to trigger cache update, since
4113         # it's wasteful to update it for every vdb change.
4114         _cache_threshold = 5
4115
4116         class BlockerData(object):
4117
4118                 __slots__ = ("__weakref__", "atoms", "counter")
4119
4120                 def __init__(self, counter, atoms):
4121                         self.counter = counter
4122                         self.atoms = atoms
4123
4124         def __init__(self, myroot, vardb):
4125                 self._vardb = vardb
4126                 self._virtuals = vardb.settings.getvirtuals()
4127                 self._cache_filename = os.path.join(myroot,
4128                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4129                 self._cache_version = "1"
4130                 self._cache_data = None
4131                 self._modified = set()
4132                 self._load()
4133
4134         def _load(self):
4135                 try:
4136                         f = open(self._cache_filename, mode='rb')
4137                         mypickle = pickle.Unpickler(f)
4138                         try:
4139                                 mypickle.find_global = None
4140                         except AttributeError:
4141                                 # TODO: If py3k, override Unpickler.find_class().
4142                                 pass
4143                         self._cache_data = mypickle.load()
4144                         f.close()
4145                         del f
4146                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4147                         if isinstance(e, pickle.UnpicklingError):
4148                                 writemsg("!!! Error loading '%s': %s\n" % \
4149                                         (self._cache_filename, str(e)), noiselevel=-1)
4150                         del e
4151
4152                 cache_valid = self._cache_data and \
4153                         isinstance(self._cache_data, dict) and \
4154                         self._cache_data.get("version") == self._cache_version and \
4155                         isinstance(self._cache_data.get("blockers"), dict)
4156                 if cache_valid:
4157                         # Validate all the atoms and counters so that
4158                         # corruption is detected as soon as possible.
4159                         invalid_items = set()
4160                         for k, v in self._cache_data["blockers"].iteritems():
4161                                 if not isinstance(k, basestring):
4162                                         invalid_items.add(k)
4163                                         continue
4164                                 try:
4165                                         if portage.catpkgsplit(k) is None:
4166                                                 invalid_items.add(k)
4167                                                 continue
4168                                 except portage.exception.InvalidData:
4169                                         invalid_items.add(k)
4170                                         continue
4171                                 if not isinstance(v, tuple) or \
4172                                         len(v) != 2:
4173                                         invalid_items.add(k)
4174                                         continue
4175                                 counter, atoms = v
4176                                 if not isinstance(counter, (int, long)):
4177                                         invalid_items.add(k)
4178                                         continue
4179                                 if not isinstance(atoms, (list, tuple)):
4180                                         invalid_items.add(k)
4181                                         continue
4182                                 invalid_atom = False
4183                                 for atom in atoms:
4184                                         if not isinstance(atom, basestring):
4185                                                 invalid_atom = True
4186                                                 break
4187                                         if atom[:1] != "!" or \
4188                                                 not portage.isvalidatom(
4189                                                 atom, allow_blockers=True):
4190                                                 invalid_atom = True
4191                                                 break
4192                                 if invalid_atom:
4193                                         invalid_items.add(k)
4194                                         continue
4195
4196                         for k in invalid_items:
4197                                 del self._cache_data["blockers"][k]
4198                         if not self._cache_data["blockers"]:
4199                                 cache_valid = False
4200
4201                 if not cache_valid:
4202                         self._cache_data = {"version":self._cache_version}
4203                         self._cache_data["blockers"] = {}
4204                         self._cache_data["virtuals"] = self._virtuals
4205                 self._modified.clear()
4206
4207         def flush(self):
4208                 """If the current user has permission and the internal blocker cache
4209                 been updated, save it to disk and mark it unmodified.  This is called
4210                 by emerge after it has proccessed blockers for all installed packages.
4211                 Currently, the cache is only written if the user has superuser
4212                 privileges (since that's required to obtain a lock), but all users
4213                 have read access and benefit from faster blocker lookups (as long as
4214                 the entire cache is still valid).  The cache is stored as a pickled
4215                 dict object with the following format:
4216
4217                 {
4218                         version : "1",
4219                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4220                         "virtuals" : vardb.settings.getvirtuals()
4221                 }
4222                 """
4223                 if len(self._modified) >= self._cache_threshold and \
4224                         secpass >= 2:
4225                         try:
4226                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4227                                 pickle.dump(self._cache_data, f, protocol=2)
4228                                 f.close()
4229                                 portage.util.apply_secpass_permissions(
4230                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4231                         except (IOError, OSError), e:
4232                                 pass
4233                         self._modified.clear()
4234
4235         def __setitem__(self, cpv, blocker_data):
4236                 """
4237                 Update the cache and mark it as modified for a future call to
4238                 self.flush().
4239
4240                 @param cpv: Package for which to cache blockers.
4241                 @type cpv: String
4242                 @param blocker_data: An object with counter and atoms attributes.
4243                 @type blocker_data: BlockerData
4244                 """
4245                 self._cache_data["blockers"][cpv] = \
4246                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4247                 self._modified.add(cpv)
4248
4249         def __iter__(self):
4250                 if self._cache_data is None:
4251                         # triggered by python-trace
4252                         return iter([])
4253                 return iter(self._cache_data["blockers"])
4254
4255         def __delitem__(self, cpv):
4256                 del self._cache_data["blockers"][cpv]
4257
4258         def __getitem__(self, cpv):
4259                 """
4260                 @rtype: BlockerData
4261                 @returns: An object with counter and atoms attributes.
4262                 """
4263                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4264
4265 class BlockerDB(object):
4266
4267         def __init__(self, root_config):
4268                 self._root_config = root_config
4269                 self._vartree = root_config.trees["vartree"]
4270                 self._portdb = root_config.trees["porttree"].dbapi
4271
4272                 self._dep_check_trees = None
4273                 self._fake_vartree = None
4274
4275         def _get_fake_vartree(self, acquire_lock=0):
4276                 fake_vartree = self._fake_vartree
4277                 if fake_vartree is None:
4278                         fake_vartree = FakeVartree(self._root_config,
4279                                 acquire_lock=acquire_lock)
4280                         self._fake_vartree = fake_vartree
4281                         self._dep_check_trees = { self._vartree.root : {
4282                                 "porttree"    :  fake_vartree,
4283                                 "vartree"     :  fake_vartree,
4284                         }}
4285                 else:
4286                         fake_vartree.sync(acquire_lock=acquire_lock)
4287                 return fake_vartree
4288
4289         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4290                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4291                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4292                 settings = self._vartree.settings
4293                 stale_cache = set(blocker_cache)
4294                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4295                 dep_check_trees = self._dep_check_trees
4296                 vardb = fake_vartree.dbapi
4297                 installed_pkgs = list(vardb)
4298
4299                 for inst_pkg in installed_pkgs:
4300                         stale_cache.discard(inst_pkg.cpv)
4301                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4302                         if cached_blockers is not None and \
4303                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4304                                 cached_blockers = None
4305                         if cached_blockers is not None:
4306                                 blocker_atoms = cached_blockers.atoms
4307                         else:
4308                                 # Use aux_get() to trigger FakeVartree global
4309                                 # updates on *DEPEND when appropriate.
4310                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4311                                 try:
4312                                         portage.dep._dep_check_strict = False
4313                                         success, atoms = portage.dep_check(depstr,
4314                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4315                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4316                                 finally:
4317                                         portage.dep._dep_check_strict = True
4318                                 if not success:
4319                                         pkg_location = os.path.join(inst_pkg.root,
4320                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4321                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4322                                                 (pkg_location, atoms), noiselevel=-1)
4323                                         continue
4324
4325                                 blocker_atoms = [atom for atom in atoms \
4326                                         if atom.startswith("!")]
4327                                 blocker_atoms.sort()
4328                                 counter = long(inst_pkg.metadata["COUNTER"])
4329                                 blocker_cache[inst_pkg.cpv] = \
4330                                         blocker_cache.BlockerData(counter, blocker_atoms)
4331                 for cpv in stale_cache:
4332                         del blocker_cache[cpv]
4333                 blocker_cache.flush()
4334
4335                 blocker_parents = digraph()
4336                 blocker_atoms = []
4337                 for pkg in installed_pkgs:
4338                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4339                                 blocker_atom = blocker_atom.lstrip("!")
4340                                 blocker_atoms.append(blocker_atom)
4341                                 blocker_parents.add(blocker_atom, pkg)
4342
4343                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4344                 blocking_pkgs = set()
4345                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4346                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4347
4348                 # Check for blockers in the other direction.
4349                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4350                 try:
4351                         portage.dep._dep_check_strict = False
4352                         success, atoms = portage.dep_check(depstr,
4353                                 vardb, settings, myuse=new_pkg.use.enabled,
4354                                 trees=dep_check_trees, myroot=new_pkg.root)
4355                 finally:
4356                         portage.dep._dep_check_strict = True
4357                 if not success:
4358                         # We should never get this far with invalid deps.
4359                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4360                         assert False
4361
4362                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4363                         if atom[:1] == "!"]
4364                 if blocker_atoms:
4365                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4366                         for inst_pkg in installed_pkgs:
4367                                 try:
4368                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4369                                 except (portage.exception.InvalidDependString, StopIteration):
4370                                         continue
4371                                 blocking_pkgs.add(inst_pkg)
4372
4373                 return blocking_pkgs
4374
4375 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4376
4377         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4378                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4379         p_type, p_root, p_key, p_status = parent_node
4380         msg = []
4381         if p_status == "nomerge":
4382                 category, pf = portage.catsplit(p_key)
4383                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4384                 msg.append("Portage is unable to process the dependencies of the ")
4385                 msg.append("'%s' package. " % p_key)
4386                 msg.append("In order to correct this problem, the package ")
4387                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4388                 msg.append("As a temporary workaround, the --nodeps option can ")
4389                 msg.append("be used to ignore all dependencies.  For reference, ")
4390                 msg.append("the problematic dependencies can be found in the ")
4391                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4392         else:
4393                 msg.append("This package can not be installed. ")
4394                 msg.append("Please notify the '%s' package maintainer " % p_key)
4395                 msg.append("about this problem.")
4396
4397         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4398         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4399
4400 class PackageVirtualDbapi(portage.dbapi):
4401         """
4402         A dbapi-like interface class that represents the state of the installed
4403         package database as new packages are installed, replacing any packages
4404         that previously existed in the same slot. The main difference between
4405         this class and fakedbapi is that this one uses Package instances
4406         internally (passed in via cpv_inject() and cpv_remove() calls).
4407         """
4408         def __init__(self, settings):
4409                 portage.dbapi.__init__(self)
4410                 self.settings = settings
4411                 self._match_cache = {}
4412                 self._cp_map = {}
4413                 self._cpv_map = {}
4414
4415         def clear(self):
4416                 """
4417                 Remove all packages.
4418                 """
4419                 if self._cpv_map:
4420                         self._clear_cache()
4421                         self._cp_map.clear()
4422                         self._cpv_map.clear()
4423
4424         def copy(self):
4425                 obj = PackageVirtualDbapi(self.settings)
4426                 obj._match_cache = self._match_cache.copy()
4427                 obj._cp_map = self._cp_map.copy()
4428                 for k, v in obj._cp_map.iteritems():
4429                         obj._cp_map[k] = v[:]
4430                 obj._cpv_map = self._cpv_map.copy()
4431                 return obj
4432
4433         def __iter__(self):
4434                 return self._cpv_map.itervalues()
4435
4436         def __contains__(self, item):
4437                 existing = self._cpv_map.get(item.cpv)
4438                 if existing is not None and \
4439                         existing == item:
4440                         return True
4441                 return False
4442
4443         def get(self, item, default=None):
4444                 cpv = getattr(item, "cpv", None)
4445                 if cpv is None:
4446                         if len(item) != 4:
4447                                 return default
4448                         type_name, root, cpv, operation = item
4449
4450                 existing = self._cpv_map.get(cpv)
4451                 if existing is not None and \
4452                         existing == item:
4453                         return existing
4454                 return default
4455
4456         def match_pkgs(self, atom):
4457                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4458
4459         def _clear_cache(self):
4460                 if self._categories is not None:
4461                         self._categories = None
4462                 if self._match_cache:
4463                         self._match_cache = {}
4464
4465         def match(self, origdep, use_cache=1):
4466                 result = self._match_cache.get(origdep)
4467                 if result is not None:
4468                         return result[:]
4469                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4470                 self._match_cache[origdep] = result
4471                 return result[:]
4472
4473         def cpv_exists(self, cpv):
4474                 return cpv in self._cpv_map
4475
4476         def cp_list(self, mycp, use_cache=1):
4477                 cachelist = self._match_cache.get(mycp)
4478                 # cp_list() doesn't expand old-style virtuals
4479                 if cachelist and cachelist[0].startswith(mycp):
4480                         return cachelist[:]
4481                 cpv_list = self._cp_map.get(mycp)
4482                 if cpv_list is None:
4483                         cpv_list = []
4484                 else:
4485                         cpv_list = [pkg.cpv for pkg in cpv_list]
4486                 self._cpv_sort_ascending(cpv_list)
4487                 if not (not cpv_list and mycp.startswith("virtual/")):
4488                         self._match_cache[mycp] = cpv_list
4489                 return cpv_list[:]
4490
4491         def cp_all(self):
4492                 return list(self._cp_map)
4493
4494         def cpv_all(self):
4495                 return list(self._cpv_map)
4496
4497         def cpv_inject(self, pkg):
4498                 cp_list = self._cp_map.get(pkg.cp)
4499                 if cp_list is None:
4500                         cp_list = []
4501                         self._cp_map[pkg.cp] = cp_list
4502                 e_pkg = self._cpv_map.get(pkg.cpv)
4503                 if e_pkg is not None:
4504                         if e_pkg == pkg:
4505                                 return
4506                         self.cpv_remove(e_pkg)
4507                 for e_pkg in cp_list:
4508                         if e_pkg.slot_atom == pkg.slot_atom:
4509                                 if e_pkg == pkg:
4510                                         return
4511                                 self.cpv_remove(e_pkg)
4512                                 break
4513                 cp_list.append(pkg)
4514                 self._cpv_map[pkg.cpv] = pkg
4515                 self._clear_cache()
4516
4517         def cpv_remove(self, pkg):
4518                 old_pkg = self._cpv_map.get(pkg.cpv)
4519                 if old_pkg != pkg:
4520                         raise KeyError(pkg)
4521                 self._cp_map[pkg.cp].remove(pkg)
4522                 del self._cpv_map[pkg.cpv]
4523                 self._clear_cache()
4524
4525         def aux_get(self, cpv, wants):
4526                 metadata = self._cpv_map[cpv].metadata
4527                 return [metadata.get(x, "") for x in wants]
4528
4529         def aux_update(self, cpv, values):
4530                 self._cpv_map[cpv].metadata.update(values)
4531                 self._clear_cache()
4532
4533 class depgraph(object):
4534
4535         pkg_tree_map = RootConfig.pkg_tree_map
4536
4537         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4538
4539         def __init__(self, settings, trees, myopts, myparams, spinner):
4540                 self.settings = settings
4541                 self.target_root = settings["ROOT"]
4542                 self.myopts = myopts
4543                 self.myparams = myparams
4544                 self.edebug = 0
4545                 if settings.get("PORTAGE_DEBUG", "") == "1":
4546                         self.edebug = 1
4547                 self.spinner = spinner
4548                 self._running_root = trees["/"]["root_config"]
4549                 self._opts_no_restart = Scheduler._opts_no_restart
4550                 self.pkgsettings = {}
4551                 # Maps slot atom to package for each Package added to the graph.
4552                 self._slot_pkg_map = {}
4553                 # Maps nodes to the reasons they were selected for reinstallation.
4554                 self._reinstall_nodes = {}
4555                 self.mydbapi = {}
4556                 self.trees = {}
4557                 self._trees_orig = trees
4558                 self.roots = {}
4559                 # Contains a filtered view of preferred packages that are selected
4560                 # from available repositories.
4561                 self._filtered_trees = {}
4562                 # Contains installed packages and new packages that have been added
4563                 # to the graph.
4564                 self._graph_trees = {}
4565                 # All Package instances
4566                 self._pkg_cache = {}
4567                 for myroot in trees:
4568                         self.trees[myroot] = {}
4569                         # Create a RootConfig instance that references
4570                         # the FakeVartree instead of the real one.
4571                         self.roots[myroot] = RootConfig(
4572                                 trees[myroot]["vartree"].settings,
4573                                 self.trees[myroot],
4574                                 trees[myroot]["root_config"].setconfig)
4575                         for tree in ("porttree", "bintree"):
4576                                 self.trees[myroot][tree] = trees[myroot][tree]
4577                         self.trees[myroot]["vartree"] = \
4578                                 FakeVartree(trees[myroot]["root_config"],
4579                                         pkg_cache=self._pkg_cache)
4580                         self.pkgsettings[myroot] = portage.config(
4581                                 clone=self.trees[myroot]["vartree"].settings)
4582                         self._slot_pkg_map[myroot] = {}
4583                         vardb = self.trees[myroot]["vartree"].dbapi
4584                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4585                                 "--buildpkgonly" not in self.myopts
4586                         # This fakedbapi instance will model the state that the vdb will
4587                         # have after new packages have been installed.
4588                         fakedb = PackageVirtualDbapi(vardb.settings)
4589                         if preload_installed_pkgs:
4590                                 for pkg in vardb:
4591                                         self.spinner.update()
4592                                         # This triggers metadata updates via FakeVartree.
4593                                         vardb.aux_get(pkg.cpv, [])
4594                                         fakedb.cpv_inject(pkg)
4595
4596                         # Now that the vardb state is cached in our FakeVartree,
4597                         # we won't be needing the real vartree cache for awhile.
4598                         # To make some room on the heap, clear the vardbapi
4599                         # caches.
4600                         trees[myroot]["vartree"].dbapi._clear_cache()
4601                         gc.collect()
4602
4603                         self.mydbapi[myroot] = fakedb
4604                         def graph_tree():
4605                                 pass
4606                         graph_tree.dbapi = fakedb
4607                         self._graph_trees[myroot] = {}
4608                         self._filtered_trees[myroot] = {}
4609                         # Substitute the graph tree for the vartree in dep_check() since we
4610                         # want atom selections to be consistent with package selections
4611                         # have already been made.
4612                         self._graph_trees[myroot]["porttree"]   = graph_tree
4613                         self._graph_trees[myroot]["vartree"]    = graph_tree
4614                         def filtered_tree():
4615                                 pass
4616                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4617                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4618
4619                         # Passing in graph_tree as the vartree here could lead to better
4620                         # atom selections in some cases by causing atoms for packages that
4621                         # have been added to the graph to be preferred over other choices.
4622                         # However, it can trigger atom selections that result in
4623                         # unresolvable direct circular dependencies. For example, this
4624                         # happens with gwydion-dylan which depends on either itself or
4625                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4626                         # gwydion-dylan-bin needs to be selected in order to avoid a
4627                         # an unresolvable direct circular dependency.
4628                         #
4629                         # To solve the problem described above, pass in "graph_db" so that
4630                         # packages that have been added to the graph are distinguishable
4631                         # from other available packages and installed packages. Also, pass
4632                         # the parent package into self._select_atoms() calls so that
4633                         # unresolvable direct circular dependencies can be detected and
4634                         # avoided when possible.
4635                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4636                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4637
4638                         dbs = []
4639                         portdb = self.trees[myroot]["porttree"].dbapi
4640                         bindb  = self.trees[myroot]["bintree"].dbapi
4641                         vardb  = self.trees[myroot]["vartree"].dbapi
4642                         #               (db, pkg_type, built, installed, db_keys)
4643                         if "--usepkgonly" not in self.myopts:
4644                                 db_keys = list(portdb._aux_cache_keys)
4645                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4646                         if "--usepkg" in self.myopts:
4647                                 db_keys = list(bindb._aux_cache_keys)
4648                                 dbs.append((bindb,  "binary", True, False, db_keys))
4649                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4650                         dbs.append((vardb, "installed", True, True, db_keys))
4651                         self._filtered_trees[myroot]["dbs"] = dbs
4652                         if "--usepkg" in self.myopts:
4653                                 self.trees[myroot]["bintree"].populate(
4654                                         "--getbinpkg" in self.myopts,
4655                                         "--getbinpkgonly" in self.myopts)
4656                 del trees
4657
4658                 self.digraph=portage.digraph()
4659                 # contains all sets added to the graph
4660                 self._sets = {}
4661                 # contains atoms given as arguments
4662                 self._sets["args"] = InternalPackageSet()
4663                 # contains all atoms from all sets added to the graph, including
4664                 # atoms given as arguments
4665                 self._set_atoms = InternalPackageSet()
4666                 self._atom_arg_map = {}
4667                 # contains all nodes pulled in by self._set_atoms
4668                 self._set_nodes = set()
4669                 # Contains only Blocker -> Uninstall edges
4670                 self._blocker_uninstalls = digraph()
4671                 # Contains only Package -> Blocker edges
4672                 self._blocker_parents = digraph()
4673                 # Contains only irrelevant Package -> Blocker edges
4674                 self._irrelevant_blockers = digraph()
4675                 # Contains only unsolvable Package -> Blocker edges
4676                 self._unsolvable_blockers = digraph()
4677                 # Contains all Blocker -> Blocked Package edges
4678                 self._blocked_pkgs = digraph()
4679                 # Contains world packages that have been protected from
4680                 # uninstallation but may not have been added to the graph
4681                 # if the graph is not complete yet.
4682                 self._blocked_world_pkgs = {}
4683                 self._slot_collision_info = {}
4684                 # Slot collision nodes are not allowed to block other packages since
4685                 # blocker validation is only able to account for one package per slot.
4686                 self._slot_collision_nodes = set()
4687                 self._parent_atoms = {}
4688                 self._slot_conflict_parent_atoms = set()
4689                 self._serialized_tasks_cache = None
4690                 self._scheduler_graph = None
4691                 self._displayed_list = None
4692                 self._pprovided_args = []
4693                 self._missing_args = []
4694                 self._masked_installed = set()
4695                 self._unsatisfied_deps_for_display = []
4696                 self._unsatisfied_blockers_for_display = None
4697                 self._circular_deps_for_display = None
4698                 self._dep_stack = []
4699                 self._unsatisfied_deps = []
4700                 self._initially_unsatisfied_deps = []
4701                 self._ignored_deps = []
4702                 self._required_set_names = set(["system", "world"])
4703                 self._select_atoms = self._select_atoms_highest_available
4704                 self._select_package = self._select_pkg_highest_available
4705                 self._highest_pkg_cache = {}
4706
4707         def _show_slot_collision_notice(self):
4708                 """Show an informational message advising the user to mask one of the
4709                 the packages. In some cases it may be possible to resolve this
4710                 automatically, but support for backtracking (removal nodes that have
4711                 already been selected) will be required in order to handle all possible
4712                 cases.
4713                 """
4714
4715                 if not self._slot_collision_info:
4716                         return
4717
4718                 self._show_merge_list()
4719
4720                 msg = []
4721                 msg.append("\n!!! Multiple package instances within a single " + \
4722                         "package slot have been pulled\n")
4723                 msg.append("!!! into the dependency graph, resulting" + \
4724                         " in a slot conflict:\n\n")
4725                 indent = "  "
4726                 # Max number of parents shown, to avoid flooding the display.
4727                 max_parents = 3
4728                 explanation_columns = 70
4729                 explanations = 0
4730                 for (slot_atom, root), slot_nodes \
4731                         in self._slot_collision_info.iteritems():
4732                         msg.append(str(slot_atom))
4733                         msg.append("\n\n")
4734
4735                         for node in slot_nodes:
4736                                 msg.append(indent)
4737                                 msg.append(str(node))
4738                                 parent_atoms = self._parent_atoms.get(node)
4739                                 if parent_atoms:
4740                                         pruned_list = set()
4741                                         # Prefer conflict atoms over others.
4742                                         for parent_atom in parent_atoms:
4743                                                 if len(pruned_list) >= max_parents:
4744                                                         break
4745                                                 if parent_atom in self._slot_conflict_parent_atoms:
4746                                                         pruned_list.add(parent_atom)
4747
4748                                         # If this package was pulled in by conflict atoms then
4749                                         # show those alone since those are the most interesting.
4750                                         if not pruned_list:
4751                                                 # When generating the pruned list, prefer instances
4752                                                 # of DependencyArg over instances of Package.
4753                                                 for parent_atom in parent_atoms:
4754                                                         if len(pruned_list) >= max_parents:
4755                                                                 break
4756                                                         parent, atom = parent_atom
4757                                                         if isinstance(parent, DependencyArg):
4758                                                                 pruned_list.add(parent_atom)
4759                                                 # Prefer Packages instances that themselves have been
4760                                                 # pulled into collision slots.
4761                                                 for parent_atom in parent_atoms:
4762                                                         if len(pruned_list) >= max_parents:
4763                                                                 break
4764                                                         parent, atom = parent_atom
4765                                                         if isinstance(parent, Package) and \
4766                                                                 (parent.slot_atom, parent.root) \
4767                                                                 in self._slot_collision_info:
4768                                                                 pruned_list.add(parent_atom)
4769                                                 for parent_atom in parent_atoms:
4770                                                         if len(pruned_list) >= max_parents:
4771                                                                 break
4772                                                         pruned_list.add(parent_atom)
4773                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4774                                         parent_atoms = pruned_list
4775                                         msg.append(" pulled in by\n")
4776                                         for parent_atom in parent_atoms:
4777                                                 parent, atom = parent_atom
4778                                                 msg.append(2*indent)
4779                                                 if isinstance(parent,
4780                                                         (PackageArg, AtomArg)):
4781                                                         # For PackageArg and AtomArg types, it's
4782                                                         # redundant to display the atom attribute.
4783                                                         msg.append(str(parent))
4784                                                 else:
4785                                                         # Display the specific atom from SetArg or
4786                                                         # Package types.
4787                                                         msg.append("%s required by %s" % (atom, parent))
4788                                                 msg.append("\n")
4789                                         if omitted_parents:
4790                                                 msg.append(2*indent)
4791                                                 msg.append("(and %d more)\n" % omitted_parents)
4792                                 else:
4793                                         msg.append(" (no parents)\n")
4794                                 msg.append("\n")
4795                         explanation = self._slot_conflict_explanation(slot_nodes)
4796                         if explanation:
4797                                 explanations += 1
4798                                 msg.append(indent + "Explanation:\n\n")
4799                                 for line in textwrap.wrap(explanation, explanation_columns):
4800                                         msg.append(2*indent + line + "\n")
4801                                 msg.append("\n")
4802                 msg.append("\n")
4803                 sys.stderr.write("".join(msg))
4804                 sys.stderr.flush()
4805
4806                 explanations_for_all = explanations == len(self._slot_collision_info)
4807
4808                 if explanations_for_all or "--quiet" in self.myopts:
4809                         return
4810
4811                 msg = []
4812                 msg.append("It may be possible to solve this problem ")
4813                 msg.append("by using package.mask to prevent one of ")
4814                 msg.append("those packages from being selected. ")
4815                 msg.append("However, it is also possible that conflicting ")
4816                 msg.append("dependencies exist such that they are impossible to ")
4817                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4818                 msg.append("the dependencies of two different packages, then those ")
4819                 msg.append("packages can not be installed simultaneously.")
4820
4821                 from formatter import AbstractFormatter, DumbWriter
4822                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4823                 for x in msg:
4824                         f.add_flowing_data(x)
4825                 f.end_paragraph(1)
4826
4827                 msg = []
4828                 msg.append("For more information, see MASKED PACKAGES ")
4829                 msg.append("section in the emerge man page or refer ")
4830                 msg.append("to the Gentoo Handbook.")
4831                 for x in msg:
4832                         f.add_flowing_data(x)
4833                 f.end_paragraph(1)
4834                 f.writer.flush()
4835
4836         def _slot_conflict_explanation(self, slot_nodes):
4837                 """
4838                 When a slot conflict occurs due to USE deps, there are a few
4839                 different cases to consider:
4840
4841                 1) New USE are correctly set but --newuse wasn't requested so an
4842                    installed package with incorrect USE happened to get pulled
4843                    into graph before the new one.
4844
4845                 2) New USE are incorrectly set but an installed package has correct
4846                    USE so it got pulled into the graph, and a new instance also got
4847                    pulled in due to --newuse or an upgrade.
4848
4849                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4850                    and multiple package instances got pulled into the same slot to
4851                    satisfy the conflicting deps.
4852
4853                 Currently, explanations and suggested courses of action are generated
4854                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4855                 """
4856
4857                 if len(slot_nodes) != 2:
4858                         # Suggestions are only implemented for
4859                         # conflicts between two packages.
4860                         return None
4861
4862                 all_conflict_atoms = self._slot_conflict_parent_atoms
4863                 matched_node = None
4864                 matched_atoms = None
4865                 unmatched_node = None
4866                 for node in slot_nodes:
4867                         parent_atoms = self._parent_atoms.get(node)
4868                         if not parent_atoms:
4869                                 # Normally, there are always parent atoms. If there are
4870                                 # none then something unexpected is happening and there's
4871                                 # currently no suggestion for this case.
4872                                 return None
4873                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4874                         for parent_atom in conflict_atoms:
4875                                 parent, atom = parent_atom
4876                                 if not atom.use:
4877                                         # Suggestions are currently only implemented for cases
4878                                         # in which all conflict atoms have USE deps.
4879                                         return None
4880                         if conflict_atoms:
4881                                 if matched_node is not None:
4882                                         # If conflict atoms match multiple nodes
4883                                         # then there's no suggestion.
4884                                         return None
4885                                 matched_node = node
4886                                 matched_atoms = conflict_atoms
4887                         else:
4888                                 if unmatched_node is not None:
4889                                         # Neither node is matched by conflict atoms, and
4890                                         # there is no suggestion for this case.
4891                                         return None
4892                                 unmatched_node = node
4893
4894                 if matched_node is None or unmatched_node is None:
4895                         # This shouldn't happen.
4896                         return None
4897
4898                 if unmatched_node.installed and not matched_node.installed and \
4899                         unmatched_node.cpv == matched_node.cpv:
4900                         # If the conflicting packages are the same version then
4901                         # --newuse should be all that's needed. If they are different
4902                         # versions then there's some other problem.
4903                         return "New USE are correctly set, but --newuse wasn't" + \
4904                                 " requested, so an installed package with incorrect USE " + \
4905                                 "happened to get pulled into the dependency graph. " + \
4906                                 "In order to solve " + \
4907                                 "this, either specify the --newuse option or explicitly " + \
4908                                 " reinstall '%s'." % matched_node.slot_atom
4909
4910                 if matched_node.installed and not unmatched_node.installed:
4911                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4912                         explanation = ("New USE for '%s' are incorrectly set. " + \
4913                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4914                                 (matched_node.slot_atom, atoms[0])
4915                         if len(atoms) > 1:
4916                                 for atom in atoms[1:-1]:
4917                                         explanation += ", '%s'" % (atom,)
4918                                 if len(atoms) > 2:
4919                                         explanation += ","
4920                                 explanation += " and '%s'" % (atoms[-1],)
4921                         explanation += "."
4922                         return explanation
4923
4924                 return None
4925
4926         def _process_slot_conflicts(self):
4927                 """
4928                 Process slot conflict data to identify specific atoms which
4929                 lead to conflict. These atoms only match a subset of the
4930                 packages that have been pulled into a given slot.
4931                 """
4932                 for (slot_atom, root), slot_nodes \
4933                         in self._slot_collision_info.iteritems():
4934
4935                         all_parent_atoms = set()
4936                         for pkg in slot_nodes:
4937                                 parent_atoms = self._parent_atoms.get(pkg)
4938                                 if not parent_atoms:
4939                                         continue
4940                                 all_parent_atoms.update(parent_atoms)
4941
4942                         for pkg in slot_nodes:
4943                                 parent_atoms = self._parent_atoms.get(pkg)
4944                                 if parent_atoms is None:
4945                                         parent_atoms = set()
4946                                         self._parent_atoms[pkg] = parent_atoms
4947                                 for parent_atom in all_parent_atoms:
4948                                         if parent_atom in parent_atoms:
4949                                                 continue
4950                                         # Use package set for matching since it will match via
4951                                         # PROVIDE when necessary, while match_from_list does not.
4952                                         parent, atom = parent_atom
4953                                         atom_set = InternalPackageSet(
4954                                                 initial_atoms=(atom,))
4955                                         if atom_set.findAtomForPackage(pkg):
4956                                                 parent_atoms.add(parent_atom)
4957                                         else:
4958                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4959
4960         def _reinstall_for_flags(self, forced_flags,
4961                 orig_use, orig_iuse, cur_use, cur_iuse):
4962                 """Return a set of flags that trigger reinstallation, or None if there
4963                 are no such flags."""
4964                 if "--newuse" in self.myopts:
4965                         flags = set(orig_iuse.symmetric_difference(
4966                                 cur_iuse).difference(forced_flags))
4967                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4968                                 cur_iuse.intersection(cur_use)))
4969                         if flags:
4970                                 return flags
4971                 elif "changed-use" == self.myopts.get("--reinstall"):
4972                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4973                                 cur_iuse.intersection(cur_use))
4974                         if flags:
4975                                 return flags
4976                 return None
4977
4978         def _create_graph(self, allow_unsatisfied=False):
4979                 dep_stack = self._dep_stack
4980                 while dep_stack:
4981                         self.spinner.update()
4982                         dep = dep_stack.pop()
4983                         if isinstance(dep, Package):
4984                                 if not self._add_pkg_deps(dep,
4985                                         allow_unsatisfied=allow_unsatisfied):
4986                                         return 0
4987                                 continue
4988                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4989                                 return 0
4990                 return 1
4991
4992         def _add_dep(self, dep, allow_unsatisfied=False):
4993                 debug = "--debug" in self.myopts
4994                 buildpkgonly = "--buildpkgonly" in self.myopts
4995                 nodeps = "--nodeps" in self.myopts
4996                 empty = "empty" in self.myparams
4997                 deep = "deep" in self.myparams
4998                 update = "--update" in self.myopts and dep.depth <= 1
4999                 if dep.blocker:
5000                         if not buildpkgonly and \
5001                                 not nodeps and \
5002                                 dep.parent not in self._slot_collision_nodes:
5003                                 if dep.parent.onlydeps:
5004                                         # It's safe to ignore blockers if the
5005                                         # parent is an --onlydeps node.
5006                                         return 1
5007                                 # The blocker applies to the root where
5008                                 # the parent is or will be installed.
5009                                 blocker = Blocker(atom=dep.atom,
5010                                         eapi=dep.parent.metadata["EAPI"],
5011                                         root=dep.parent.root)
5012                                 self._blocker_parents.add(blocker, dep.parent)
5013                         return 1
5014                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5015                         onlydeps=dep.onlydeps)
5016                 if not dep_pkg:
5017                         if dep.priority.optional:
5018                                 # This could be an unecessary build-time dep
5019                                 # pulled in by --with-bdeps=y.
5020                                 return 1
5021                         if allow_unsatisfied:
5022                                 self._unsatisfied_deps.append(dep)
5023                                 return 1
5024                         self._unsatisfied_deps_for_display.append(
5025                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5026                         return 0
5027                 # In some cases, dep_check will return deps that shouldn't
5028                 # be proccessed any further, so they are identified and
5029                 # discarded here. Try to discard as few as possible since
5030                 # discarded dependencies reduce the amount of information
5031                 # available for optimization of merge order.
5032                 if dep.priority.satisfied and \
5033                         not dep_pkg.installed and \
5034                         not (existing_node or empty or deep or update):
5035                         myarg = None
5036                         if dep.root == self.target_root:
5037                                 try:
5038                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5039                                 except StopIteration:
5040                                         pass
5041                                 except portage.exception.InvalidDependString:
5042                                         if not dep_pkg.installed:
5043                                                 # This shouldn't happen since the package
5044                                                 # should have been masked.
5045                                                 raise
5046                         if not myarg:
5047                                 self._ignored_deps.append(dep)
5048                                 return 1
5049
5050                 if not self._add_pkg(dep_pkg, dep):
5051                         return 0
5052                 return 1
5053
5054         def _add_pkg(self, pkg, dep):
5055                 myparent = None
5056                 priority = None
5057                 depth = 0
5058                 if dep is None:
5059                         dep = Dependency()
5060                 else:
5061                         myparent = dep.parent
5062                         priority = dep.priority
5063                         depth = dep.depth
5064                 if priority is None:
5065                         priority = DepPriority()
5066                 """
5067                 Fills the digraph with nodes comprised of packages to merge.
5068                 mybigkey is the package spec of the package to merge.
5069                 myparent is the package depending on mybigkey ( or None )
5070                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5071                         Think --onlydeps, we need to ignore packages in that case.
5072                 #stuff to add:
5073                 #SLOT-aware emerge
5074                 #IUSE-aware emerge -> USE DEP aware depgraph
5075                 #"no downgrade" emerge
5076                 """
5077                 # Ensure that the dependencies of the same package
5078                 # are never processed more than once.
5079                 previously_added = pkg in self.digraph
5080
5081                 # select the correct /var database that we'll be checking against
5082                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5083                 pkgsettings = self.pkgsettings[pkg.root]
5084
5085                 arg_atoms = None
5086                 if True:
5087                         try:
5088                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5089                         except portage.exception.InvalidDependString, e:
5090                                 if not pkg.installed:
5091                                         show_invalid_depstring_notice(
5092                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5093                                         return 0
5094                                 del e
5095
5096                 if not pkg.onlydeps:
5097                         if not pkg.installed and \
5098                                 "empty" not in self.myparams and \
5099                                 vardbapi.match(pkg.slot_atom):
5100                                 # Increase the priority of dependencies on packages that
5101                                 # are being rebuilt. This optimizes merge order so that
5102                                 # dependencies are rebuilt/updated as soon as possible,
5103                                 # which is needed especially when emerge is called by
5104                                 # revdep-rebuild since dependencies may be affected by ABI
5105                                 # breakage that has rendered them useless. Don't adjust
5106                                 # priority here when in "empty" mode since all packages
5107                                 # are being merged in that case.
5108                                 priority.rebuild = True
5109
5110                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5111                         slot_collision = False
5112                         if existing_node:
5113                                 existing_node_matches = pkg.cpv == existing_node.cpv
5114                                 if existing_node_matches and \
5115                                         pkg != existing_node and \
5116                                         dep.atom is not None:
5117                                         # Use package set for matching since it will match via
5118                                         # PROVIDE when necessary, while match_from_list does not.
5119                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5120                                         if not atom_set.findAtomForPackage(existing_node):
5121                                                 existing_node_matches = False
5122                                 if existing_node_matches:
5123                                         # The existing node can be reused.
5124                                         if arg_atoms:
5125                                                 for parent_atom in arg_atoms:
5126                                                         parent, atom = parent_atom
5127                                                         self.digraph.add(existing_node, parent,
5128                                                                 priority=priority)
5129                                                         self._add_parent_atom(existing_node, parent_atom)
5130                                         # If a direct circular dependency is not an unsatisfied
5131                                         # buildtime dependency then drop it here since otherwise
5132                                         # it can skew the merge order calculation in an unwanted
5133                                         # way.
5134                                         if existing_node != myparent or \
5135                                                 (priority.buildtime and not priority.satisfied):
5136                                                 self.digraph.addnode(existing_node, myparent,
5137                                                         priority=priority)
5138                                                 if dep.atom is not None and dep.parent is not None:
5139                                                         self._add_parent_atom(existing_node,
5140                                                                 (dep.parent, dep.atom))
5141                                         return 1
5142                                 else:
5143
5144                                         # A slot collision has occurred.  Sometimes this coincides
5145                                         # with unresolvable blockers, so the slot collision will be
5146                                         # shown later if there are no unresolvable blockers.
5147                                         self._add_slot_conflict(pkg)
5148                                         slot_collision = True
5149
5150                         if slot_collision:
5151                                 # Now add this node to the graph so that self.display()
5152                                 # can show use flags and --tree portage.output.  This node is
5153                                 # only being partially added to the graph.  It must not be
5154                                 # allowed to interfere with the other nodes that have been
5155                                 # added.  Do not overwrite data for existing nodes in
5156                                 # self.mydbapi since that data will be used for blocker
5157                                 # validation.
5158                                 # Even though the graph is now invalid, continue to process
5159                                 # dependencies so that things like --fetchonly can still
5160                                 # function despite collisions.
5161                                 pass
5162                         elif not previously_added:
5163                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5164                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5165                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5166
5167                         if not pkg.installed:
5168                                 # Allow this package to satisfy old-style virtuals in case it
5169                                 # doesn't already. Any pre-existing providers will be preferred
5170                                 # over this one.
5171                                 try:
5172                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5173                                         # For consistency, also update the global virtuals.
5174                                         settings = self.roots[pkg.root].settings
5175                                         settings.unlock()
5176                                         settings.setinst(pkg.cpv, pkg.metadata)
5177                                         settings.lock()
5178                                 except portage.exception.InvalidDependString, e:
5179                                         show_invalid_depstring_notice(
5180                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5181                                         del e
5182                                         return 0
5183
5184                 if arg_atoms:
5185                         self._set_nodes.add(pkg)
5186
5187                 # Do this even when addme is False (--onlydeps) so that the
5188                 # parent/child relationship is always known in case
5189                 # self._show_slot_collision_notice() needs to be called later.
5190                 self.digraph.add(pkg, myparent, priority=priority)
5191                 if dep.atom is not None and dep.parent is not None:
5192                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5193
5194                 if arg_atoms:
5195                         for parent_atom in arg_atoms:
5196                                 parent, atom = parent_atom
5197                                 self.digraph.add(pkg, parent, priority=priority)
5198                                 self._add_parent_atom(pkg, parent_atom)
5199
5200                 """ This section determines whether we go deeper into dependencies or not.
5201                     We want to go deeper on a few occasions:
5202                     Installing package A, we need to make sure package A's deps are met.
5203                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5204                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5205                 """
5206                 dep_stack = self._dep_stack
5207                 if "recurse" not in self.myparams:
5208                         return 1
5209                 elif pkg.installed and \
5210                         "deep" not in self.myparams:
5211                         dep_stack = self._ignored_deps
5212
5213                 self.spinner.update()
5214
5215                 if arg_atoms:
5216                         depth = 0
5217                 pkg.depth = depth
5218                 if not previously_added:
5219                         dep_stack.append(pkg)
5220                 return 1
5221
5222         def _add_parent_atom(self, pkg, parent_atom):
5223                 parent_atoms = self._parent_atoms.get(pkg)
5224                 if parent_atoms is None:
5225                         parent_atoms = set()
5226                         self._parent_atoms[pkg] = parent_atoms
5227                 parent_atoms.add(parent_atom)
5228
5229         def _add_slot_conflict(self, pkg):
5230                 self._slot_collision_nodes.add(pkg)
5231                 slot_key = (pkg.slot_atom, pkg.root)
5232                 slot_nodes = self._slot_collision_info.get(slot_key)
5233                 if slot_nodes is None:
5234                         slot_nodes = set()
5235                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5236                         self._slot_collision_info[slot_key] = slot_nodes
5237                 slot_nodes.add(pkg)
5238
5239         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5240
5241                 mytype = pkg.type_name
5242                 myroot = pkg.root
5243                 mykey = pkg.cpv
5244                 metadata = pkg.metadata
5245                 myuse = pkg.use.enabled
5246                 jbigkey = pkg
5247                 depth = pkg.depth + 1
5248                 removal_action = "remove" in self.myparams
5249
5250                 edepend={}
5251                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5252                 for k in depkeys:
5253                         edepend[k] = metadata[k]
5254
5255                 if not pkg.built and \
5256                         "--buildpkgonly" in self.myopts and \
5257                         "deep" not in self.myparams and \
5258                         "empty" not in self.myparams:
5259                         edepend["RDEPEND"] = ""
5260                         edepend["PDEPEND"] = ""
5261                 bdeps_optional = False
5262
5263                 if pkg.built and not removal_action:
5264                         if self.myopts.get("--with-bdeps", "n") == "y":
5265                                 # Pull in build time deps as requested, but marked them as
5266                                 # "optional" since they are not strictly required. This allows
5267                                 # more freedom in the merge order calculation for solving
5268                                 # circular dependencies. Don't convert to PDEPEND since that
5269                                 # could make --with-bdeps=y less effective if it is used to
5270                                 # adjust merge order to prevent built_with_use() calls from
5271                                 # failing.
5272                                 bdeps_optional = True
5273                         else:
5274                                 # built packages do not have build time dependencies.
5275                                 edepend["DEPEND"] = ""
5276
5277                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5278                         edepend["DEPEND"] = ""
5279
5280                 bdeps_root = "/"
5281                 root_deps = self.myopts.get("--root-deps")
5282                 if root_deps is not None:
5283                         if root_deps is True:
5284                                 bdeps_root = myroot
5285                         elif root_deps == "rdeps":
5286                                 edepend["DEPEND"] = ""
5287
5288                 deps = (
5289                         (bdeps_root, edepend["DEPEND"],
5290                                 self._priority(buildtime=(not bdeps_optional),
5291                                 optional=bdeps_optional)),
5292                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5293                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5294                 )
5295
5296                 debug = "--debug" in self.myopts
5297                 strict = mytype != "installed"
5298                 try:
5299                         for dep_root, dep_string, dep_priority in deps:
5300                                 if not dep_string:
5301                                         continue
5302                                 if debug:
5303                                         print
5304                                         print "Parent:   ", jbigkey
5305                                         print "Depstring:", dep_string
5306                                         print "Priority:", dep_priority
5307                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5308                                 try:
5309                                         selected_atoms = self._select_atoms(dep_root,
5310                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5311                                                 priority=dep_priority)
5312                                 except portage.exception.InvalidDependString, e:
5313                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5314                                         return 0
5315                                 if debug:
5316                                         print "Candidates:", selected_atoms
5317
5318                                 for atom in selected_atoms:
5319                                         try:
5320
5321                                                 atom = portage.dep.Atom(atom)
5322
5323                                                 mypriority = dep_priority.copy()
5324                                                 if not atom.blocker and vardb.match(atom):
5325                                                         mypriority.satisfied = True
5326
5327                                                 if not self._add_dep(Dependency(atom=atom,
5328                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5329                                                         priority=mypriority, root=dep_root),
5330                                                         allow_unsatisfied=allow_unsatisfied):
5331                                                         return 0
5332
5333                                         except portage.exception.InvalidAtom, e:
5334                                                 show_invalid_depstring_notice(
5335                                                         pkg, dep_string, str(e))
5336                                                 del e
5337                                                 if not pkg.installed:
5338                                                         return 0
5339
5340                                 if debug:
5341                                         print "Exiting...", jbigkey
5342                 except portage.exception.AmbiguousPackageName, e:
5343                         pkgs = e.args[0]
5344                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5345                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5346                         for cpv in pkgs:
5347                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5348                         portage.writemsg("\n", noiselevel=-1)
5349                         if mytype == "binary":
5350                                 portage.writemsg(
5351                                         "!!! This binary package cannot be installed: '%s'\n" % \
5352                                         mykey, noiselevel=-1)
5353                         elif mytype == "ebuild":
5354                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5355                                 myebuild, mylocation = portdb.findname2(mykey)
5356                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5357                                         "'%s'\n" % myebuild, noiselevel=-1)
5358                         portage.writemsg("!!! Please notify the package maintainer " + \
5359                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5360                         return 0
5361                 return 1
5362
5363         def _priority(self, **kwargs):
5364                 if "remove" in self.myparams:
5365                         priority_constructor = UnmergeDepPriority
5366                 else:
5367                         priority_constructor = DepPriority
5368                 return priority_constructor(**kwargs)
5369
5370         def _dep_expand(self, root_config, atom_without_category):
5371                 """
5372                 @param root_config: a root config instance
5373                 @type root_config: RootConfig
5374                 @param atom_without_category: an atom without a category component
5375                 @type atom_without_category: String
5376                 @rtype: list
5377                 @returns: a list of atoms containing categories (possibly empty)
5378                 """
5379                 null_cp = portage.dep_getkey(insert_category_into_atom(
5380                         atom_without_category, "null"))
5381                 cat, atom_pn = portage.catsplit(null_cp)
5382
5383                 dbs = self._filtered_trees[root_config.root]["dbs"]
5384                 categories = set()
5385                 for db, pkg_type, built, installed, db_keys in dbs:
5386                         for cat in db.categories:
5387                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5388                                         categories.add(cat)
5389
5390                 deps = []
5391                 for cat in categories:
5392                         deps.append(insert_category_into_atom(
5393                                 atom_without_category, cat))
5394                 return deps
5395
5396         def _have_new_virt(self, root, atom_cp):
5397                 ret = False
5398                 for db, pkg_type, built, installed, db_keys in \
5399                         self._filtered_trees[root]["dbs"]:
5400                         if db.cp_list(atom_cp):
5401                                 ret = True
5402                                 break
5403                 return ret
5404
5405         def _iter_atoms_for_pkg(self, pkg):
5406                 # TODO: add multiple $ROOT support
5407                 if pkg.root != self.target_root:
5408                         return
5409                 atom_arg_map = self._atom_arg_map
5410                 root_config = self.roots[pkg.root]
5411                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5412                         atom_cp = portage.dep_getkey(atom)
5413                         if atom_cp != pkg.cp and \
5414                                 self._have_new_virt(pkg.root, atom_cp):
5415                                 continue
5416                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5417                         visible_pkgs.reverse() # descending order
5418                         higher_slot = None
5419                         for visible_pkg in visible_pkgs:
5420                                 if visible_pkg.cp != atom_cp:
5421                                         continue
5422                                 if pkg >= visible_pkg:
5423                                         # This is descending order, and we're not
5424                                         # interested in any versions <= pkg given.
5425                                         break
5426                                 if pkg.slot_atom != visible_pkg.slot_atom:
5427                                         higher_slot = visible_pkg
5428                                         break
5429                         if higher_slot is not None:
5430                                 continue
5431                         for arg in atom_arg_map[(atom, pkg.root)]:
5432                                 if isinstance(arg, PackageArg) and \
5433                                         arg.package != pkg:
5434                                         continue
5435                                 yield arg, atom
5436
5437         def select_files(self, myfiles):
5438                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5439                 appropriate depgraph and return a favorite list."""
5440                 debug = "--debug" in self.myopts
5441                 root_config = self.roots[self.target_root]
5442                 sets = root_config.sets
5443                 getSetAtoms = root_config.setconfig.getSetAtoms
5444                 myfavorites=[]
5445                 myroot = self.target_root
5446                 dbs = self._filtered_trees[myroot]["dbs"]
5447                 vardb = self.trees[myroot]["vartree"].dbapi
5448                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5449                 portdb = self.trees[myroot]["porttree"].dbapi
5450                 bindb = self.trees[myroot]["bintree"].dbapi
5451                 pkgsettings = self.pkgsettings[myroot]
5452                 args = []
5453                 onlydeps = "--onlydeps" in self.myopts
5454                 lookup_owners = []
5455                 for x in myfiles:
5456                         ext = os.path.splitext(x)[1]
5457                         if ext==".tbz2":
5458                                 if not os.path.exists(x):
5459                                         if os.path.exists(
5460                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5461                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5462                                         elif os.path.exists(
5463                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5464                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5465                                         else:
5466                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5467                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5468                                                 return 0, myfavorites
5469                                 mytbz2=portage.xpak.tbz2(x)
5470                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5471                                 if os.path.realpath(x) != \
5472                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5473                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5474                                         return 0, myfavorites
5475                                 db_keys = list(bindb._aux_cache_keys)
5476                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5477                                 pkg = Package(type_name="binary", root_config=root_config,
5478                                         cpv=mykey, built=True, metadata=metadata,
5479                                         onlydeps=onlydeps)
5480                                 self._pkg_cache[pkg] = pkg
5481                                 args.append(PackageArg(arg=x, package=pkg,
5482                                         root_config=root_config))
5483                         elif ext==".ebuild":
5484                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5485                                 pkgdir = os.path.dirname(ebuild_path)
5486                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5487                                 cp = pkgdir[len(tree_root)+1:]
5488                                 e = portage.exception.PackageNotFound(
5489                                         ("%s is not in a valid portage tree " + \
5490                                         "hierarchy or does not exist") % x)
5491                                 if not portage.isvalidatom(cp):
5492                                         raise e
5493                                 cat = portage.catsplit(cp)[0]
5494                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5495                                 if not portage.isvalidatom("="+mykey):
5496                                         raise e
5497                                 ebuild_path = portdb.findname(mykey)
5498                                 if ebuild_path:
5499                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5500                                                 cp, os.path.basename(ebuild_path)):
5501                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5502                                                 return 0, myfavorites
5503                                         if mykey not in portdb.xmatch(
5504                                                 "match-visible", portage.dep_getkey(mykey)):
5505                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5506                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5507                                                 print colorize("BAD", "*** page for details.")
5508                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5509                                                         "Continuing...")
5510                                 else:
5511                                         raise portage.exception.PackageNotFound(
5512                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5513                                 db_keys = list(portdb._aux_cache_keys)
5514                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5515                                 pkg = Package(type_name="ebuild", root_config=root_config,
5516                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5517                                 pkgsettings.setcpv(pkg)
5518                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5519                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5520                                 self._pkg_cache[pkg] = pkg
5521                                 args.append(PackageArg(arg=x, package=pkg,
5522                                         root_config=root_config))
5523                         elif x.startswith(os.path.sep):
5524                                 if not x.startswith(myroot):
5525                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5526                                                 " $ROOT.\n") % x, noiselevel=-1)
5527                                         return 0, []
5528                                 # Queue these up since it's most efficient to handle
5529                                 # multiple files in a single iter_owners() call.
5530                                 lookup_owners.append(x)
5531                         else:
5532                                 if x in ("system", "world"):
5533                                         x = SETPREFIX + x
5534                                 if x.startswith(SETPREFIX):
5535                                         s = x[len(SETPREFIX):]
5536                                         if s not in sets:
5537                                                 raise portage.exception.PackageSetNotFound(s)
5538                                         if s in self._sets:
5539                                                 continue
5540                                         # Recursively expand sets so that containment tests in
5541                                         # self._get_parent_sets() properly match atoms in nested
5542                                         # sets (like if world contains system).
5543                                         expanded_set = InternalPackageSet(
5544                                                 initial_atoms=getSetAtoms(s))
5545                                         self._sets[s] = expanded_set
5546                                         args.append(SetArg(arg=x, set=expanded_set,
5547                                                 root_config=root_config))
5548                                         continue
5549                                 if not is_valid_package_atom(x):
5550                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5551                                                 noiselevel=-1)
5552                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5553                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5554                                         return (0,[])
5555                                 # Don't expand categories or old-style virtuals here unless
5556                                 # necessary. Expansion of old-style virtuals here causes at
5557                                 # least the following problems:
5558                                 #   1) It's more difficult to determine which set(s) an atom
5559                                 #      came from, if any.
5560                                 #   2) It takes away freedom from the resolver to choose other
5561                                 #      possible expansions when necessary.
5562                                 if "/" in x:
5563                                         args.append(AtomArg(arg=x, atom=x,
5564                                                 root_config=root_config))
5565                                         continue
5566                                 expanded_atoms = self._dep_expand(root_config, x)
5567                                 installed_cp_set = set()
5568                                 for atom in expanded_atoms:
5569                                         atom_cp = portage.dep_getkey(atom)
5570                                         if vardb.cp_list(atom_cp):
5571                                                 installed_cp_set.add(atom_cp)
5572                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5573                                         installed_cp = iter(installed_cp_set).next()
5574                                         expanded_atoms = [atom for atom in expanded_atoms \
5575                                                 if portage.dep_getkey(atom) == installed_cp]
5576
5577                                 if len(expanded_atoms) > 1:
5578                                         print
5579                                         print
5580                                         ambiguous_package_name(x, expanded_atoms, root_config,
5581                                                 self.spinner, self.myopts)
5582                                         return False, myfavorites
5583                                 if expanded_atoms:
5584                                         atom = expanded_atoms[0]
5585                                 else:
5586                                         null_atom = insert_category_into_atom(x, "null")
5587                                         null_cp = portage.dep_getkey(null_atom)
5588                                         cat, atom_pn = portage.catsplit(null_cp)
5589                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5590                                         if virts_p:
5591                                                 # Allow the depgraph to choose which virtual.
5592                                                 atom = insert_category_into_atom(x, "virtual")
5593                                         else:
5594                                                 atom = insert_category_into_atom(x, "null")
5595
5596                                 args.append(AtomArg(arg=x, atom=atom,
5597                                         root_config=root_config))
5598
5599                 if lookup_owners:
5600                         relative_paths = []
5601                         search_for_multiple = False
5602                         if len(lookup_owners) > 1:
5603                                 search_for_multiple = True
5604
5605                         for x in lookup_owners:
5606                                 if not search_for_multiple and os.path.isdir(x):
5607                                         search_for_multiple = True
5608                                 relative_paths.append(x[len(myroot):])
5609
5610                         owners = set()
5611                         for pkg, relative_path in \
5612                                 real_vardb._owners.iter_owners(relative_paths):
5613                                 owners.add(pkg.mycpv)
5614                                 if not search_for_multiple:
5615                                         break
5616
5617                         if not owners:
5618                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5619                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5620                                 return 0, []
5621
5622                         for cpv in owners:
5623                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5624                                 if not slot:
5625                                         # portage now masks packages with missing slot, but it's
5626                                         # possible that one was installed by an older version
5627                                         atom = portage.cpv_getkey(cpv)
5628                                 else:
5629                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5630                                 args.append(AtomArg(arg=atom, atom=atom,
5631                                         root_config=root_config))
5632
5633                 if "--update" in self.myopts:
5634                         # In some cases, the greedy slots behavior can pull in a slot that
5635                         # the user would want to uninstall due to it being blocked by a
5636                         # newer version in a different slot. Therefore, it's necessary to
5637                         # detect and discard any that should be uninstalled. Each time
5638                         # that arguments are updated, package selections are repeated in
5639                         # order to ensure consistency with the current arguments:
5640                         #
5641                         #  1) Initialize args
5642                         #  2) Select packages and generate initial greedy atoms
5643                         #  3) Update args with greedy atoms
5644                         #  4) Select packages and generate greedy atoms again, while
5645                         #     accounting for any blockers between selected packages
5646                         #  5) Update args with revised greedy atoms
5647
5648                         self._set_args(args)
5649                         greedy_args = []
5650                         for arg in args:
5651                                 greedy_args.append(arg)
5652                                 if not isinstance(arg, AtomArg):
5653                                         continue
5654                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5655                                         greedy_args.append(
5656                                                 AtomArg(arg=arg.arg, atom=atom,
5657                                                         root_config=arg.root_config))
5658
5659                         self._set_args(greedy_args)
5660                         del greedy_args
5661
5662                         # Revise greedy atoms, accounting for any blockers
5663                         # between selected packages.
5664                         revised_greedy_args = []
5665                         for arg in args:
5666                                 revised_greedy_args.append(arg)
5667                                 if not isinstance(arg, AtomArg):
5668                                         continue
5669                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5670                                         blocker_lookahead=True):
5671                                         revised_greedy_args.append(
5672                                                 AtomArg(arg=arg.arg, atom=atom,
5673                                                         root_config=arg.root_config))
5674                         args = revised_greedy_args
5675                         del revised_greedy_args
5676
5677                 self._set_args(args)
5678
5679                 myfavorites = set(myfavorites)
5680                 for arg in args:
5681                         if isinstance(arg, (AtomArg, PackageArg)):
5682                                 myfavorites.add(arg.atom)
5683                         elif isinstance(arg, SetArg):
5684                                 myfavorites.add(arg.arg)
5685                 myfavorites = list(myfavorites)
5686
5687                 pprovideddict = pkgsettings.pprovideddict
5688                 if debug:
5689                         portage.writemsg("\n", noiselevel=-1)
5690                 # Order needs to be preserved since a feature of --nodeps
5691                 # is to allow the user to force a specific merge order.
5692                 args.reverse()
5693                 while args:
5694                         arg = args.pop()
5695                         for atom in arg.set:
5696                                 self.spinner.update()
5697                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5698                                         root=myroot, parent=arg)
5699                                 atom_cp = portage.dep_getkey(atom)
5700                                 try:
5701                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5702                                         if pprovided and portage.match_from_list(atom, pprovided):
5703                                                 # A provided package has been specified on the command line.
5704                                                 self._pprovided_args.append((arg, atom))
5705                                                 continue
5706                                         if isinstance(arg, PackageArg):
5707                                                 if not self._add_pkg(arg.package, dep) or \
5708                                                         not self._create_graph():
5709                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5710                                                                 "dependencies for %s\n") % arg.arg)
5711                                                         return 0, myfavorites
5712                                                 continue
5713                                         if debug:
5714                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5715                                                         (arg, atom), noiselevel=-1)
5716                                         pkg, existing_node = self._select_package(
5717                                                 myroot, atom, onlydeps=onlydeps)
5718                                         if not pkg:
5719                                                 if not (isinstance(arg, SetArg) and \
5720                                                         arg.name in ("system", "world")):
5721                                                         self._unsatisfied_deps_for_display.append(
5722                                                                 ((myroot, atom), {}))
5723                                                         return 0, myfavorites
5724                                                 self._missing_args.append((arg, atom))
5725                                                 continue
5726                                         if atom_cp != pkg.cp:
5727                                                 # For old-style virtuals, we need to repeat the
5728                                                 # package.provided check against the selected package.
5729                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5730                                                 pprovided = pprovideddict.get(pkg.cp)
5731                                                 if pprovided and \
5732                                                         portage.match_from_list(expanded_atom, pprovided):
5733                                                         # A provided package has been
5734                                                         # specified on the command line.
5735                                                         self._pprovided_args.append((arg, atom))
5736                                                         continue
5737                                         if pkg.installed and "selective" not in self.myparams:
5738                                                 self._unsatisfied_deps_for_display.append(
5739                                                         ((myroot, atom), {}))
5740                                                 # Previous behavior was to bail out in this case, but
5741                                                 # since the dep is satisfied by the installed package,
5742                                                 # it's more friendly to continue building the graph
5743                                                 # and just show a warning message. Therefore, only bail
5744                                                 # out here if the atom is not from either the system or
5745                                                 # world set.
5746                                                 if not (isinstance(arg, SetArg) and \
5747                                                         arg.name in ("system", "world")):
5748                                                         return 0, myfavorites
5749
5750                                         # Add the selected package to the graph as soon as possible
5751                                         # so that later dep_check() calls can use it as feedback
5752                                         # for making more consistent atom selections.
5753                                         if not self._add_pkg(pkg, dep):
5754                                                 if isinstance(arg, SetArg):
5755                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5756                                                                 "dependencies for %s from %s\n") % \
5757                                                                 (atom, arg.arg))
5758                                                 else:
5759                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5760                                                                 "dependencies for %s\n") % atom)
5761                                                 return 0, myfavorites
5762
5763                                 except portage.exception.MissingSignature, e:
5764                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5765                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5766                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5767                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5768                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5769                                         return 0, myfavorites
5770                                 except portage.exception.InvalidSignature, e:
5771                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5772                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5773                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5774                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5775                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5776                                         return 0, myfavorites
5777                                 except SystemExit, e:
5778                                         raise # Needed else can't exit
5779                                 except Exception, e:
5780                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5781                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5782                                         raise
5783
5784                 # Now that the root packages have been added to the graph,
5785                 # process the dependencies.
5786                 if not self._create_graph():
5787                         return 0, myfavorites
5788
5789                 missing=0
5790                 if "--usepkgonly" in self.myopts:
5791                         for xs in self.digraph.all_nodes():
5792                                 if not isinstance(xs, Package):
5793                                         continue
5794                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5795                                         if missing == 0:
5796                                                 print
5797                                         missing += 1
5798                                         print "Missing binary for:",xs[2]
5799
5800                 try:
5801                         self.altlist()
5802                 except self._unknown_internal_error:
5803                         return False, myfavorites
5804
5805                 # We're true here unless we are missing binaries.
5806                 return (not missing,myfavorites)
5807
5808         def _set_args(self, args):
5809                 """
5810                 Create the "args" package set from atoms and packages given as
5811                 arguments. This method can be called multiple times if necessary.
5812                 The package selection cache is automatically invalidated, since
5813                 arguments influence package selections.
5814                 """
5815                 args_set = self._sets["args"]
5816                 args_set.clear()
5817                 for arg in args:
5818                         if not isinstance(arg, (AtomArg, PackageArg)):
5819                                 continue
5820                         atom = arg.atom
5821                         if atom in args_set:
5822                                 continue
5823                         args_set.add(atom)
5824
5825                 self._set_atoms.clear()
5826                 self._set_atoms.update(chain(*self._sets.itervalues()))
5827                 atom_arg_map = self._atom_arg_map
5828                 atom_arg_map.clear()
5829                 for arg in args:
5830                         for atom in arg.set:
5831                                 atom_key = (atom, arg.root_config.root)
5832                                 refs = atom_arg_map.get(atom_key)
5833                                 if refs is None:
5834                                         refs = []
5835                                         atom_arg_map[atom_key] = refs
5836                                         if arg not in refs:
5837                                                 refs.append(arg)
5838
5839                 # Invalidate the package selection cache, since
5840                 # arguments influence package selections.
5841                 self._highest_pkg_cache.clear()
5842                 for trees in self._filtered_trees.itervalues():
5843                         trees["porttree"].dbapi._clear_cache()
5844
5845         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5846                 """
5847                 Return a list of slot atoms corresponding to installed slots that
5848                 differ from the slot of the highest visible match. When
5849                 blocker_lookahead is True, slot atoms that would trigger a blocker
5850                 conflict are automatically discarded, potentially allowing automatic
5851                 uninstallation of older slots when appropriate.
5852                 """
5853                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5854                 if highest_pkg is None:
5855                         return []
5856                 vardb = root_config.trees["vartree"].dbapi
5857                 slots = set()
5858                 for cpv in vardb.match(atom):
5859                         # don't mix new virtuals with old virtuals
5860                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5861                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5862
5863                 slots.add(highest_pkg.metadata["SLOT"])
5864                 if len(slots) == 1:
5865                         return []
5866                 greedy_pkgs = []
5867                 slots.remove(highest_pkg.metadata["SLOT"])
5868                 while slots:
5869                         slot = slots.pop()
5870                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5871                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5872                         if pkg is not None and \
5873                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5874                                 greedy_pkgs.append(pkg)
5875                 if not greedy_pkgs:
5876                         return []
5877                 if not blocker_lookahead:
5878                         return [pkg.slot_atom for pkg in greedy_pkgs]
5879
5880                 blockers = {}
5881                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5882                 for pkg in greedy_pkgs + [highest_pkg]:
5883                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5884                         try:
5885                                 atoms = self._select_atoms(
5886                                         pkg.root, dep_str, pkg.use.enabled,
5887                                         parent=pkg, strict=True)
5888                         except portage.exception.InvalidDependString:
5889                                 continue
5890                         blocker_atoms = (x for x in atoms if x.blocker)
5891                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5892
5893                 if highest_pkg not in blockers:
5894                         return []
5895
5896                 # filter packages with invalid deps
5897                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5898
5899                 # filter packages that conflict with highest_pkg
5900                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5901                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5902                         blockers[pkg].findAtomForPackage(highest_pkg))]
5903
5904                 if not greedy_pkgs:
5905                         return []
5906
5907                 # If two packages conflict, discard the lower version.
5908                 discard_pkgs = set()
5909                 greedy_pkgs.sort(reverse=True)
5910                 for i in xrange(len(greedy_pkgs) - 1):
5911                         pkg1 = greedy_pkgs[i]
5912                         if pkg1 in discard_pkgs:
5913                                 continue
5914                         for j in xrange(i + 1, len(greedy_pkgs)):
5915                                 pkg2 = greedy_pkgs[j]
5916                                 if pkg2 in discard_pkgs:
5917                                         continue
5918                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5919                                         blockers[pkg2].findAtomForPackage(pkg1):
5920                                         # pkg1 > pkg2
5921                                         discard_pkgs.add(pkg2)
5922
5923                 return [pkg.slot_atom for pkg in greedy_pkgs \
5924                         if pkg not in discard_pkgs]
5925
5926         def _select_atoms_from_graph(self, *pargs, **kwargs):
5927                 """
5928                 Prefer atoms matching packages that have already been
5929                 added to the graph or those that are installed and have
5930                 not been scheduled for replacement.
5931                 """
5932                 kwargs["trees"] = self._graph_trees
5933                 return self._select_atoms_highest_available(*pargs, **kwargs)
5934
5935         def _select_atoms_highest_available(self, root, depstring,
5936                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5937                 """This will raise InvalidDependString if necessary. If trees is
5938                 None then self._filtered_trees is used."""
5939                 pkgsettings = self.pkgsettings[root]
5940                 if trees is None:
5941                         trees = self._filtered_trees
5942                 if not getattr(priority, "buildtime", False):
5943                         # The parent should only be passed to dep_check() for buildtime
5944                         # dependencies since that's the only case when it's appropriate
5945                         # to trigger the circular dependency avoidance code which uses it.
5946                         # It's important not to trigger the same circular dependency
5947                         # avoidance code for runtime dependencies since it's not needed
5948                         # and it can promote an incorrect package choice.
5949                         parent = None
5950                 if True:
5951                         try:
5952                                 if parent is not None:
5953                                         trees[root]["parent"] = parent
5954                                 if not strict:
5955                                         portage.dep._dep_check_strict = False
5956                                 mycheck = portage.dep_check(depstring, None,
5957                                         pkgsettings, myuse=myuse,
5958                                         myroot=root, trees=trees)
5959                         finally:
5960                                 if parent is not None:
5961                                         trees[root].pop("parent")
5962                                 portage.dep._dep_check_strict = True
5963                         if not mycheck[0]:
5964                                 raise portage.exception.InvalidDependString(mycheck[1])
5965                         selected_atoms = mycheck[1]
5966                 return selected_atoms
5967
5968         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5969                 atom = portage.dep.Atom(atom)
5970                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5971                 atom_without_use = atom
5972                 if atom.use:
5973                         atom_without_use = portage.dep.remove_slot(atom)
5974                         if atom.slot:
5975                                 atom_without_use += ":" + atom.slot
5976                         atom_without_use = portage.dep.Atom(atom_without_use)
5977                 xinfo = '"%s"' % atom
5978                 if arg:
5979                         xinfo='"%s"' % arg
5980                 # Discard null/ from failed cpv_expand category expansion.
5981                 xinfo = xinfo.replace("null/", "")
5982                 masked_packages = []
5983                 missing_use = []
5984                 masked_pkg_instances = set()
5985                 missing_licenses = []
5986                 have_eapi_mask = False
5987                 pkgsettings = self.pkgsettings[root]
5988                 implicit_iuse = pkgsettings._get_implicit_iuse()
5989                 root_config = self.roots[root]
5990                 portdb = self.roots[root].trees["porttree"].dbapi
5991                 dbs = self._filtered_trees[root]["dbs"]
5992                 for db, pkg_type, built, installed, db_keys in dbs:
5993                         if installed:
5994                                 continue
5995                         match = db.match
5996                         if hasattr(db, "xmatch"):
5997                                 cpv_list = db.xmatch("match-all", atom_without_use)
5998                         else:
5999                                 cpv_list = db.match(atom_without_use)
6000                         # descending order
6001                         cpv_list.reverse()
6002                         for cpv in cpv_list:
6003                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6004                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6005                                 if metadata is not None:
6006                                         pkg = Package(built=built, cpv=cpv,
6007                                                 installed=installed, metadata=metadata,
6008                                                 root_config=root_config)
6009                                         if pkg.cp != atom.cp:
6010                                                 # A cpv can be returned from dbapi.match() as an
6011                                                 # old-style virtual match even in cases when the
6012                                                 # package does not actually PROVIDE the virtual.
6013                                                 # Filter out any such false matches here.
6014                                                 if not atom_set.findAtomForPackage(pkg):
6015                                                         continue
6016                                         if mreasons:
6017                                                 masked_pkg_instances.add(pkg)
6018                                         if atom.use:
6019                                                 missing_use.append(pkg)
6020                                                 if not mreasons:
6021                                                         continue
6022                                 masked_packages.append(
6023                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6024
6025                 missing_use_reasons = []
6026                 missing_iuse_reasons = []
6027                 for pkg in missing_use:
6028                         use = pkg.use.enabled
6029                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6030                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6031                         missing_iuse = []
6032                         for x in atom.use.required:
6033                                 if iuse_re.match(x) is None:
6034                                         missing_iuse.append(x)
6035                         mreasons = []
6036                         if missing_iuse:
6037                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6038                                 missing_iuse_reasons.append((pkg, mreasons))
6039                         else:
6040                                 need_enable = sorted(atom.use.enabled.difference(use))
6041                                 need_disable = sorted(atom.use.disabled.intersection(use))
6042                                 if need_enable or need_disable:
6043                                         changes = []
6044                                         changes.extend(colorize("red", "+" + x) \
6045                                                 for x in need_enable)
6046                                         changes.extend(colorize("blue", "-" + x) \
6047                                                 for x in need_disable)
6048                                         mreasons.append("Change USE: %s" % " ".join(changes))
6049                                         missing_use_reasons.append((pkg, mreasons))
6050
6051                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6052                         in missing_use_reasons if pkg not in masked_pkg_instances]
6053
6054                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6055                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6056
6057                 show_missing_use = False
6058                 if unmasked_use_reasons:
6059                         # Only show the latest version.
6060                         show_missing_use = unmasked_use_reasons[:1]
6061                 elif unmasked_iuse_reasons:
6062                         if missing_use_reasons:
6063                                 # All packages with required IUSE are masked,
6064                                 # so display a normal masking message.
6065                                 pass
6066                         else:
6067                                 show_missing_use = unmasked_iuse_reasons
6068
6069                 if show_missing_use:
6070                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6071                         print "!!! One of the following packages is required to complete your request:"
6072                         for pkg, mreasons in show_missing_use:
6073                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6074
6075                 elif masked_packages:
6076                         print "\n!!! " + \
6077                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6078                                 colorize("INFORM", xinfo) + \
6079                                 colorize("BAD", " have been masked.")
6080                         print "!!! One of the following masked packages is required to complete your request:"
6081                         have_eapi_mask = show_masked_packages(masked_packages)
6082                         if have_eapi_mask:
6083                                 print
6084                                 msg = ("The current version of portage supports " + \
6085                                         "EAPI '%s'. You must upgrade to a newer version" + \
6086                                         " of portage before EAPI masked packages can" + \
6087                                         " be installed.") % portage.const.EAPI
6088                                 from textwrap import wrap
6089                                 for line in wrap(msg, 75):
6090                                         print line
6091                         print
6092                         show_mask_docs()
6093                 else:
6094                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6095
6096                 # Show parent nodes and the argument that pulled them in.
6097                 traversed_nodes = set()
6098                 node = myparent
6099                 msg = []
6100                 while node is not None:
6101                         traversed_nodes.add(node)
6102                         msg.append('(dependency required by "%s" [%s])' % \
6103                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6104                         # When traversing to parents, prefer arguments over packages
6105                         # since arguments are root nodes. Never traverse the same
6106                         # package twice, in order to prevent an infinite loop.
6107                         selected_parent = None
6108                         for parent in self.digraph.parent_nodes(node):
6109                                 if isinstance(parent, DependencyArg):
6110                                         msg.append('(dependency required by "%s" [argument])' % \
6111                                                 (colorize('INFORM', str(parent))))
6112                                         selected_parent = None
6113                                         break
6114                                 if parent not in traversed_nodes:
6115                                         selected_parent = parent
6116                         node = selected_parent
6117                 for line in msg:
6118                         print line
6119
6120                 print
6121
6122         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6123                 cache_key = (root, atom, onlydeps)
6124                 ret = self._highest_pkg_cache.get(cache_key)
6125                 if ret is not None:
6126                         pkg, existing = ret
6127                         if pkg and not existing:
6128                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6129                                 if existing and existing == pkg:
6130                                         # Update the cache to reflect that the
6131                                         # package has been added to the graph.
6132                                         ret = pkg, pkg
6133                                         self._highest_pkg_cache[cache_key] = ret
6134                         return ret
6135                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6136                 self._highest_pkg_cache[cache_key] = ret
6137                 pkg, existing = ret
6138                 if pkg is not None:
6139                         settings = pkg.root_config.settings
6140                         if visible(settings, pkg) and not (pkg.installed and \
6141                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6142                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6143                 return ret
6144
6145         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6146                 root_config = self.roots[root]
6147                 pkgsettings = self.pkgsettings[root]
6148                 dbs = self._filtered_trees[root]["dbs"]
6149                 vardb = self.roots[root].trees["vartree"].dbapi
6150                 portdb = self.roots[root].trees["porttree"].dbapi
6151                 # List of acceptable packages, ordered by type preference.
6152                 matched_packages = []
6153                 highest_version = None
6154                 if not isinstance(atom, portage.dep.Atom):
6155                         atom = portage.dep.Atom(atom)
6156                 atom_cp = atom.cp
6157                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6158                 existing_node = None
6159                 myeb = None
6160                 usepkgonly = "--usepkgonly" in self.myopts
6161                 empty = "empty" in self.myparams
6162                 selective = "selective" in self.myparams
6163                 reinstall = False
6164                 noreplace = "--noreplace" in self.myopts
6165                 # Behavior of the "selective" parameter depends on
6166                 # whether or not a package matches an argument atom.
6167                 # If an installed package provides an old-style
6168                 # virtual that is no longer provided by an available
6169                 # package, the installed package may match an argument
6170                 # atom even though none of the available packages do.
6171                 # Therefore, "selective" logic does not consider
6172                 # whether or not an installed package matches an
6173                 # argument atom. It only considers whether or not
6174                 # available packages match argument atoms, which is
6175                 # represented by the found_available_arg flag.
6176                 found_available_arg = False
6177                 for find_existing_node in True, False:
6178                         if existing_node:
6179                                 break
6180                         for db, pkg_type, built, installed, db_keys in dbs:
6181                                 if existing_node:
6182                                         break
6183                                 if installed and not find_existing_node:
6184                                         want_reinstall = reinstall or empty or \
6185                                                 (found_available_arg and not selective)
6186                                         if want_reinstall and matched_packages:
6187                                                 continue
6188                                 if hasattr(db, "xmatch"):
6189                                         cpv_list = db.xmatch("match-all", atom)
6190                                 else:
6191                                         cpv_list = db.match(atom)
6192
6193                                 # USE=multislot can make an installed package appear as if
6194                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6195                                 # won't do any good as long as USE=multislot is enabled since
6196                                 # the newly built package still won't have the expected slot.
6197                                 # Therefore, assume that such SLOT dependencies are already
6198                                 # satisfied rather than forcing a rebuild.
6199                                 if installed and not cpv_list and atom.slot:
6200                                         for cpv in db.match(atom.cp):
6201                                                 slot_available = False
6202                                                 for other_db, other_type, other_built, \
6203                                                         other_installed, other_keys in dbs:
6204                                                         try:
6205                                                                 if atom.slot == \
6206                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6207                                                                         slot_available = True
6208                                                                         break
6209                                                         except KeyError:
6210                                                                 pass
6211                                                 if not slot_available:
6212                                                         continue
6213                                                 inst_pkg = self._pkg(cpv, "installed",
6214                                                         root_config, installed=installed)
6215                                                 # Remove the slot from the atom and verify that
6216                                                 # the package matches the resulting atom.
6217                                                 atom_without_slot = portage.dep.remove_slot(atom)
6218                                                 if atom.use:
6219                                                         atom_without_slot += str(atom.use)
6220                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6221                                                 if portage.match_from_list(
6222                                                         atom_without_slot, [inst_pkg]):
6223                                                         cpv_list = [inst_pkg.cpv]
6224                                                 break
6225
6226                                 if not cpv_list:
6227                                         continue
6228                                 pkg_status = "merge"
6229                                 if installed or onlydeps:
6230                                         pkg_status = "nomerge"
6231                                 # descending order
6232                                 cpv_list.reverse()
6233                                 for cpv in cpv_list:
6234                                         # Make --noreplace take precedence over --newuse.
6235                                         if not installed and noreplace and \
6236                                                 cpv in vardb.match(atom):
6237                                                 # If the installed version is masked, it may
6238                                                 # be necessary to look at lower versions,
6239                                                 # in case there is a visible downgrade.
6240                                                 continue
6241                                         reinstall_for_flags = None
6242                                         cache_key = (pkg_type, root, cpv, pkg_status)
6243                                         calculated_use = True
6244                                         pkg = self._pkg_cache.get(cache_key)
6245                                         if pkg is None:
6246                                                 calculated_use = False
6247                                                 try:
6248                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6249                                                 except KeyError:
6250                                                         continue
6251                                                 pkg = Package(built=built, cpv=cpv,
6252                                                         installed=installed, metadata=metadata,
6253                                                         onlydeps=onlydeps, root_config=root_config,
6254                                                         type_name=pkg_type)
6255                                                 metadata = pkg.metadata
6256                                                 if not built:
6257                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6258                                                 if not built and ("?" in metadata["LICENSE"] or \
6259                                                         "?" in metadata["PROVIDE"]):
6260                                                         # This is avoided whenever possible because
6261                                                         # it's expensive. It only needs to be done here
6262                                                         # if it has an effect on visibility.
6263                                                         pkgsettings.setcpv(pkg)
6264                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6265                                                         calculated_use = True
6266                                                 self._pkg_cache[pkg] = pkg
6267
6268                                         if not installed or (built and matched_packages):
6269                                                 # Only enforce visibility on installed packages
6270                                                 # if there is at least one other visible package
6271                                                 # available. By filtering installed masked packages
6272                                                 # here, packages that have been masked since they
6273                                                 # were installed can be automatically downgraded
6274                                                 # to an unmasked version.
6275                                                 try:
6276                                                         if not visible(pkgsettings, pkg):
6277                                                                 continue
6278                                                 except portage.exception.InvalidDependString:
6279                                                         if not installed:
6280                                                                 continue
6281
6282                                                 # Enable upgrade or downgrade to a version
6283                                                 # with visible KEYWORDS when the installed
6284                                                 # version is masked by KEYWORDS, but never
6285                                                 # reinstall the same exact version only due
6286                                                 # to a KEYWORDS mask.
6287                                                 if built and matched_packages:
6288
6289                                                         different_version = None
6290                                                         for avail_pkg in matched_packages:
6291                                                                 if not portage.dep.cpvequal(
6292                                                                         pkg.cpv, avail_pkg.cpv):
6293                                                                         different_version = avail_pkg
6294                                                                         break
6295                                                         if different_version is not None:
6296
6297                                                                 if installed and \
6298                                                                         pkgsettings._getMissingKeywords(
6299                                                                         pkg.cpv, pkg.metadata):
6300                                                                         continue
6301
6302                                                                 # If the ebuild no longer exists or it's
6303                                                                 # keywords have been dropped, reject built
6304                                                                 # instances (installed or binary).
6305                                                                 # If --usepkgonly is enabled, assume that
6306                                                                 # the ebuild status should be ignored.
6307                                                                 if not usepkgonly:
6308                                                                         try:
6309                                                                                 pkg_eb = self._pkg(
6310                                                                                         pkg.cpv, "ebuild", root_config)
6311                                                                         except portage.exception.PackageNotFound:
6312                                                                                 continue
6313                                                                         else:
6314                                                                                 if not visible(pkgsettings, pkg_eb):
6315                                                                                         continue
6316
6317                                         if not pkg.built and not calculated_use:
6318                                                 # This is avoided whenever possible because
6319                                                 # it's expensive.
6320                                                 pkgsettings.setcpv(pkg)
6321                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6322
6323                                         if pkg.cp != atom.cp:
6324                                                 # A cpv can be returned from dbapi.match() as an
6325                                                 # old-style virtual match even in cases when the
6326                                                 # package does not actually PROVIDE the virtual.
6327                                                 # Filter out any such false matches here.
6328                                                 if not atom_set.findAtomForPackage(pkg):
6329                                                         continue
6330
6331                                         myarg = None
6332                                         if root == self.target_root:
6333                                                 try:
6334                                                         # Ebuild USE must have been calculated prior
6335                                                         # to this point, in case atoms have USE deps.
6336                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6337                                                 except StopIteration:
6338                                                         pass
6339                                                 except portage.exception.InvalidDependString:
6340                                                         if not installed:
6341                                                                 # masked by corruption
6342                                                                 continue
6343                                         if not installed and myarg:
6344                                                 found_available_arg = True
6345
6346                                         if atom.use and not pkg.built:
6347                                                 use = pkg.use.enabled
6348                                                 if atom.use.enabled.difference(use):
6349                                                         continue
6350                                                 if atom.use.disabled.intersection(use):
6351                                                         continue
6352                                         if pkg.cp == atom_cp:
6353                                                 if highest_version is None:
6354                                                         highest_version = pkg
6355                                                 elif pkg > highest_version:
6356                                                         highest_version = pkg
6357                                         # At this point, we've found the highest visible
6358                                         # match from the current repo. Any lower versions
6359                                         # from this repo are ignored, so this so the loop
6360                                         # will always end with a break statement below
6361                                         # this point.
6362                                         if find_existing_node:
6363                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6364                                                 if not e_pkg:
6365                                                         break
6366                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6367                                                         if highest_version and \
6368                                                                 e_pkg.cp == atom_cp and \
6369                                                                 e_pkg < highest_version and \
6370                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6371                                                                 # There is a higher version available in a
6372                                                                 # different slot, so this existing node is
6373                                                                 # irrelevant.
6374                                                                 pass
6375                                                         else:
6376                                                                 matched_packages.append(e_pkg)
6377                                                                 existing_node = e_pkg
6378                                                 break
6379                                         # Compare built package to current config and
6380                                         # reject the built package if necessary.
6381                                         if built and not installed and \
6382                                                 ("--newuse" in self.myopts or \
6383                                                 "--reinstall" in self.myopts):
6384                                                 iuses = pkg.iuse.all
6385                                                 old_use = pkg.use.enabled
6386                                                 if myeb:
6387                                                         pkgsettings.setcpv(myeb)
6388                                                 else:
6389                                                         pkgsettings.setcpv(pkg)
6390                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6391                                                 forced_flags = set()
6392                                                 forced_flags.update(pkgsettings.useforce)
6393                                                 forced_flags.update(pkgsettings.usemask)
6394                                                 cur_iuse = iuses
6395                                                 if myeb and not usepkgonly:
6396                                                         cur_iuse = myeb.iuse.all
6397                                                 if self._reinstall_for_flags(forced_flags,
6398                                                         old_use, iuses,
6399                                                         now_use, cur_iuse):
6400                                                         break
6401                                         # Compare current config to installed package
6402                                         # and do not reinstall if possible.
6403                                         if not installed and \
6404                                                 ("--newuse" in self.myopts or \
6405                                                 "--reinstall" in self.myopts) and \
6406                                                 cpv in vardb.match(atom):
6407                                                 pkgsettings.setcpv(pkg)
6408                                                 forced_flags = set()
6409                                                 forced_flags.update(pkgsettings.useforce)
6410                                                 forced_flags.update(pkgsettings.usemask)
6411                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6412                                                 old_iuse = set(filter_iuse_defaults(
6413                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6414                                                 cur_use = pkg.use.enabled
6415                                                 cur_iuse = pkg.iuse.all
6416                                                 reinstall_for_flags = \
6417                                                         self._reinstall_for_flags(
6418                                                         forced_flags, old_use, old_iuse,
6419                                                         cur_use, cur_iuse)
6420                                                 if reinstall_for_flags:
6421                                                         reinstall = True
6422                                         if not built:
6423                                                 myeb = pkg
6424                                         matched_packages.append(pkg)
6425                                         if reinstall_for_flags:
6426                                                 self._reinstall_nodes[pkg] = \
6427                                                         reinstall_for_flags
6428                                         break
6429
6430                 if not matched_packages:
6431                         return None, None
6432
6433                 if "--debug" in self.myopts:
6434                         for pkg in matched_packages:
6435                                 portage.writemsg("%s %s\n" % \
6436                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6437
6438                 # Filter out any old-style virtual matches if they are
6439                 # mixed with new-style virtual matches.
6440                 cp = portage.dep_getkey(atom)
6441                 if len(matched_packages) > 1 and \
6442                         "virtual" == portage.catsplit(cp)[0]:
6443                         for pkg in matched_packages:
6444                                 if pkg.cp != cp:
6445                                         continue
6446                                 # Got a new-style virtual, so filter
6447                                 # out any old-style virtuals.
6448                                 matched_packages = [pkg for pkg in matched_packages \
6449                                         if pkg.cp == cp]
6450                                 break
6451
6452                 if len(matched_packages) > 1:
6453                         bestmatch = portage.best(
6454                                 [pkg.cpv for pkg in matched_packages])
6455                         matched_packages = [pkg for pkg in matched_packages \
6456                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6457
6458                 # ordered by type preference ("ebuild" type is the last resort)
6459                 return  matched_packages[-1], existing_node
6460
6461         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6462                 """
6463                 Select packages that have already been added to the graph or
6464                 those that are installed and have not been scheduled for
6465                 replacement.
6466                 """
6467                 graph_db = self._graph_trees[root]["porttree"].dbapi
6468                 matches = graph_db.match_pkgs(atom)
6469                 if not matches:
6470                         return None, None
6471                 pkg = matches[-1] # highest match
6472                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6473                 return pkg, in_graph
6474
6475         def _complete_graph(self):
6476                 """
6477                 Add any deep dependencies of required sets (args, system, world) that
6478                 have not been pulled into the graph yet. This ensures that the graph
6479                 is consistent such that initially satisfied deep dependencies are not
6480                 broken in the new graph. Initially unsatisfied dependencies are
6481                 irrelevant since we only want to avoid breaking dependencies that are
6482                 intially satisfied.
6483
6484                 Since this method can consume enough time to disturb users, it is
6485                 currently only enabled by the --complete-graph option.
6486                 """
6487                 if "--buildpkgonly" in self.myopts or \
6488                         "recurse" not in self.myparams:
6489                         return 1
6490
6491                 if "complete" not in self.myparams:
6492                         # Skip this to avoid consuming enough time to disturb users.
6493                         return 1
6494
6495                 # Put the depgraph into a mode that causes it to only
6496                 # select packages that have already been added to the
6497                 # graph or those that are installed and have not been
6498                 # scheduled for replacement. Also, toggle the "deep"
6499                 # parameter so that all dependencies are traversed and
6500                 # accounted for.
6501                 self._select_atoms = self._select_atoms_from_graph
6502                 self._select_package = self._select_pkg_from_graph
6503                 already_deep = "deep" in self.myparams
6504                 if not already_deep:
6505                         self.myparams.add("deep")
6506
6507                 for root in self.roots:
6508                         required_set_names = self._required_set_names.copy()
6509                         if root == self.target_root and \
6510                                 (already_deep or "empty" in self.myparams):
6511                                 required_set_names.difference_update(self._sets)
6512                         if not required_set_names and not self._ignored_deps:
6513                                 continue
6514                         root_config = self.roots[root]
6515                         setconfig = root_config.setconfig
6516                         args = []
6517                         # Reuse existing SetArg instances when available.
6518                         for arg in self.digraph.root_nodes():
6519                                 if not isinstance(arg, SetArg):
6520                                         continue
6521                                 if arg.root_config != root_config:
6522                                         continue
6523                                 if arg.name in required_set_names:
6524                                         args.append(arg)
6525                                         required_set_names.remove(arg.name)
6526                         # Create new SetArg instances only when necessary.
6527                         for s in required_set_names:
6528                                 expanded_set = InternalPackageSet(
6529                                         initial_atoms=setconfig.getSetAtoms(s))
6530                                 atom = SETPREFIX + s
6531                                 args.append(SetArg(arg=atom, set=expanded_set,
6532                                         root_config=root_config))
6533                         vardb = root_config.trees["vartree"].dbapi
6534                         for arg in args:
6535                                 for atom in arg.set:
6536                                         self._dep_stack.append(
6537                                                 Dependency(atom=atom, root=root, parent=arg))
6538                         if self._ignored_deps:
6539                                 self._dep_stack.extend(self._ignored_deps)
6540                                 self._ignored_deps = []
6541                         if not self._create_graph(allow_unsatisfied=True):
6542                                 return 0
6543                         # Check the unsatisfied deps to see if any initially satisfied deps
6544                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6545                         # deps are irrelevant since we only want to avoid breaking deps
6546                         # that are initially satisfied.
6547                         while self._unsatisfied_deps:
6548                                 dep = self._unsatisfied_deps.pop()
6549                                 matches = vardb.match_pkgs(dep.atom)
6550                                 if not matches:
6551                                         self._initially_unsatisfied_deps.append(dep)
6552                                         continue
6553                                 # An scheduled installation broke a deep dependency.
6554                                 # Add the installed package to the graph so that it
6555                                 # will be appropriately reported as a slot collision
6556                                 # (possibly solvable via backtracking).
6557                                 pkg = matches[-1] # highest match
6558                                 if not self._add_pkg(pkg, dep):
6559                                         return 0
6560                                 if not self._create_graph(allow_unsatisfied=True):
6561                                         return 0
6562                 return 1
6563
6564         def _pkg(self, cpv, type_name, root_config, installed=False):
6565                 """
6566                 Get a package instance from the cache, or create a new
6567                 one if necessary. Raises KeyError from aux_get if it
6568                 failures for some reason (package does not exist or is
6569                 corrupt).
6570                 """
6571                 operation = "merge"
6572                 if installed:
6573                         operation = "nomerge"
6574                 pkg = self._pkg_cache.get(
6575                         (type_name, root_config.root, cpv, operation))
6576                 if pkg is None:
6577                         tree_type = self.pkg_tree_map[type_name]
6578                         db = root_config.trees[tree_type].dbapi
6579                         db_keys = list(self._trees_orig[root_config.root][
6580                                 tree_type].dbapi._aux_cache_keys)
6581                         try:
6582                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6583                         except KeyError:
6584                                 raise portage.exception.PackageNotFound(cpv)
6585                         pkg = Package(cpv=cpv, metadata=metadata,
6586                                 root_config=root_config, installed=installed)
6587                         if type_name == "ebuild":
6588                                 settings = self.pkgsettings[root_config.root]
6589                                 settings.setcpv(pkg)
6590                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6591                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6592                         self._pkg_cache[pkg] = pkg
6593                 return pkg
6594
6595         def validate_blockers(self):
6596                 """Remove any blockers from the digraph that do not match any of the
6597                 packages within the graph.  If necessary, create hard deps to ensure
6598                 correct merge order such that mutually blocking packages are never
6599                 installed simultaneously."""
6600
6601                 if "--buildpkgonly" in self.myopts or \
6602                         "--nodeps" in self.myopts:
6603                         return True
6604
6605                 #if "deep" in self.myparams:
6606                 if True:
6607                         # Pull in blockers from all installed packages that haven't already
6608                         # been pulled into the depgraph.  This is not enabled by default
6609                         # due to the performance penalty that is incurred by all the
6610                         # additional dep_check calls that are required.
6611
6612                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6613                         for myroot in self.trees:
6614                                 vardb = self.trees[myroot]["vartree"].dbapi
6615                                 portdb = self.trees[myroot]["porttree"].dbapi
6616                                 pkgsettings = self.pkgsettings[myroot]
6617                                 final_db = self.mydbapi[myroot]
6618
6619                                 blocker_cache = BlockerCache(myroot, vardb)
6620                                 stale_cache = set(blocker_cache)
6621                                 for pkg in vardb:
6622                                         cpv = pkg.cpv
6623                                         stale_cache.discard(cpv)
6624                                         pkg_in_graph = self.digraph.contains(pkg)
6625
6626                                         # Check for masked installed packages. Only warn about
6627                                         # packages that are in the graph in order to avoid warning
6628                                         # about those that will be automatically uninstalled during
6629                                         # the merge process or by --depclean.
6630                                         if pkg in final_db:
6631                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6632                                                         self._masked_installed.add(pkg)
6633
6634                                         blocker_atoms = None
6635                                         blockers = None
6636                                         if pkg_in_graph:
6637                                                 blockers = []
6638                                                 try:
6639                                                         blockers.extend(
6640                                                                 self._blocker_parents.child_nodes(pkg))
6641                                                 except KeyError:
6642                                                         pass
6643                                                 try:
6644                                                         blockers.extend(
6645                                                                 self._irrelevant_blockers.child_nodes(pkg))
6646                                                 except KeyError:
6647                                                         pass
6648                                         if blockers is not None:
6649                                                 blockers = set(str(blocker.atom) \
6650                                                         for blocker in blockers)
6651
6652                                         # If this node has any blockers, create a "nomerge"
6653                                         # node for it so that they can be enforced.
6654                                         self.spinner.update()
6655                                         blocker_data = blocker_cache.get(cpv)
6656                                         if blocker_data is not None and \
6657                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6658                                                 blocker_data = None
6659
6660                                         # If blocker data from the graph is available, use
6661                                         # it to validate the cache and update the cache if
6662                                         # it seems invalid.
6663                                         if blocker_data is not None and \
6664                                                 blockers is not None:
6665                                                 if not blockers.symmetric_difference(
6666                                                         blocker_data.atoms):
6667                                                         continue
6668                                                 blocker_data = None
6669
6670                                         if blocker_data is None and \
6671                                                 blockers is not None:
6672                                                 # Re-use the blockers from the graph.
6673                                                 blocker_atoms = sorted(blockers)
6674                                                 counter = long(pkg.metadata["COUNTER"])
6675                                                 blocker_data = \
6676                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6677                                                 blocker_cache[pkg.cpv] = blocker_data
6678                                                 continue
6679
6680                                         if blocker_data:
6681                                                 blocker_atoms = blocker_data.atoms
6682                                         else:
6683                                                 # Use aux_get() to trigger FakeVartree global
6684                                                 # updates on *DEPEND when appropriate.
6685                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6686                                                 # It is crucial to pass in final_db here in order to
6687                                                 # optimize dep_check calls by eliminating atoms via
6688                                                 # dep_wordreduce and dep_eval calls.
6689                                                 try:
6690                                                         portage.dep._dep_check_strict = False
6691                                                         try:
6692                                                                 success, atoms = portage.dep_check(depstr,
6693                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6694                                                                         trees=self._graph_trees, myroot=myroot)
6695                                                         except Exception, e:
6696                                                                 if isinstance(e, SystemExit):
6697                                                                         raise
6698                                                                 # This is helpful, for example, if a ValueError
6699                                                                 # is thrown from cpv_expand due to multiple
6700                                                                 # matches (this can happen if an atom lacks a
6701                                                                 # category).
6702                                                                 show_invalid_depstring_notice(
6703                                                                         pkg, depstr, str(e))
6704                                                                 del e
6705                                                                 raise
6706                                                 finally:
6707                                                         portage.dep._dep_check_strict = True
6708                                                 if not success:
6709                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6710                                                         if replacement_pkg and \
6711                                                                 replacement_pkg[0].operation == "merge":
6712                                                                 # This package is being replaced anyway, so
6713                                                                 # ignore invalid dependencies so as not to
6714                                                                 # annoy the user too much (otherwise they'd be
6715                                                                 # forced to manually unmerge it first).
6716                                                                 continue
6717                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6718                                                         return False
6719                                                 blocker_atoms = [myatom for myatom in atoms \
6720                                                         if myatom.startswith("!")]
6721                                                 blocker_atoms.sort()
6722                                                 counter = long(pkg.metadata["COUNTER"])
6723                                                 blocker_cache[cpv] = \
6724                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6725                                         if blocker_atoms:
6726                                                 try:
6727                                                         for atom in blocker_atoms:
6728                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6729                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6730                                                                 self._blocker_parents.add(blocker, pkg)
6731                                                 except portage.exception.InvalidAtom, e:
6732                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6733                                                         show_invalid_depstring_notice(
6734                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6735                                                         return False
6736                                 for cpv in stale_cache:
6737                                         del blocker_cache[cpv]
6738                                 blocker_cache.flush()
6739                                 del blocker_cache
6740
6741                 # Discard any "uninstall" tasks scheduled by previous calls
6742                 # to this method, since those tasks may not make sense given
6743                 # the current graph state.
6744                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6745                 if previous_uninstall_tasks:
6746                         self._blocker_uninstalls = digraph()
6747                         self.digraph.difference_update(previous_uninstall_tasks)
6748
6749                 for blocker in self._blocker_parents.leaf_nodes():
6750                         self.spinner.update()
6751                         root_config = self.roots[blocker.root]
6752                         virtuals = root_config.settings.getvirtuals()
6753                         myroot = blocker.root
6754                         initial_db = self.trees[myroot]["vartree"].dbapi
6755                         final_db = self.mydbapi[myroot]
6756                         
6757                         provider_virtual = False
6758                         if blocker.cp in virtuals and \
6759                                 not self._have_new_virt(blocker.root, blocker.cp):
6760                                 provider_virtual = True
6761
6762                         # Use this to check PROVIDE for each matched package
6763                         # when necessary.
6764                         atom_set = InternalPackageSet(
6765                                 initial_atoms=[blocker.atom])
6766
6767                         if provider_virtual:
6768                                 atoms = []
6769                                 for provider_entry in virtuals[blocker.cp]:
6770                                         provider_cp = \
6771                                                 portage.dep_getkey(provider_entry)
6772                                         atoms.append(blocker.atom.replace(
6773                                                 blocker.cp, provider_cp))
6774                         else:
6775                                 atoms = [blocker.atom]
6776
6777                         blocked_initial = set()
6778                         for atom in atoms:
6779                                 for pkg in initial_db.match_pkgs(atom):
6780                                         if atom_set.findAtomForPackage(pkg):
6781                                                 blocked_initial.add(pkg)
6782
6783                         blocked_final = set()
6784                         for atom in atoms:
6785                                 for pkg in final_db.match_pkgs(atom):
6786                                         if atom_set.findAtomForPackage(pkg):
6787                                                 blocked_final.add(pkg)
6788
6789                         if not blocked_initial and not blocked_final:
6790                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6791                                 self._blocker_parents.remove(blocker)
6792                                 # Discard any parents that don't have any more blockers.
6793                                 for pkg in parent_pkgs:
6794                                         self._irrelevant_blockers.add(blocker, pkg)
6795                                         if not self._blocker_parents.child_nodes(pkg):
6796                                                 self._blocker_parents.remove(pkg)
6797                                 continue
6798                         for parent in self._blocker_parents.parent_nodes(blocker):
6799                                 unresolved_blocks = False
6800                                 depends_on_order = set()
6801                                 for pkg in blocked_initial:
6802                                         if pkg.slot_atom == parent.slot_atom:
6803                                                 # TODO: Support blocks within slots in cases where it
6804                                                 # might make sense.  For example, a new version might
6805                                                 # require that the old version be uninstalled at build
6806                                                 # time.
6807                                                 continue
6808                                         if parent.installed:
6809                                                 # Two currently installed packages conflict with
6810                                                 # eachother. Ignore this case since the damage
6811                                                 # is already done and this would be likely to
6812                                                 # confuse users if displayed like a normal blocker.
6813                                                 continue
6814
6815                                         self._blocked_pkgs.add(pkg, blocker)
6816
6817                                         if parent.operation == "merge":
6818                                                 # Maybe the blocked package can be replaced or simply
6819                                                 # unmerged to resolve this block.
6820                                                 depends_on_order.add((pkg, parent))
6821                                                 continue
6822                                         # None of the above blocker resolutions techniques apply,
6823                                         # so apparently this one is unresolvable.
6824                                         unresolved_blocks = True
6825                                 for pkg in blocked_final:
6826                                         if pkg.slot_atom == parent.slot_atom:
6827                                                 # TODO: Support blocks within slots.
6828                                                 continue
6829                                         if parent.operation == "nomerge" and \
6830                                                 pkg.operation == "nomerge":
6831                                                 # This blocker will be handled the next time that a
6832                                                 # merge of either package is triggered.
6833                                                 continue
6834
6835                                         self._blocked_pkgs.add(pkg, blocker)
6836
6837                                         # Maybe the blocking package can be
6838                                         # unmerged to resolve this block.
6839                                         if parent.operation == "merge" and pkg.installed:
6840                                                 depends_on_order.add((pkg, parent))
6841                                                 continue
6842                                         elif parent.operation == "nomerge":
6843                                                 depends_on_order.add((parent, pkg))
6844                                                 continue
6845                                         # None of the above blocker resolutions techniques apply,
6846                                         # so apparently this one is unresolvable.
6847                                         unresolved_blocks = True
6848
6849                                 # Make sure we don't unmerge any package that have been pulled
6850                                 # into the graph.
6851                                 if not unresolved_blocks and depends_on_order:
6852                                         for inst_pkg, inst_task in depends_on_order:
6853                                                 if self.digraph.contains(inst_pkg) and \
6854                                                         self.digraph.parent_nodes(inst_pkg):
6855                                                         unresolved_blocks = True
6856                                                         break
6857
6858                                 if not unresolved_blocks and depends_on_order:
6859                                         for inst_pkg, inst_task in depends_on_order:
6860                                                 uninst_task = Package(built=inst_pkg.built,
6861                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6862                                                         metadata=inst_pkg.metadata,
6863                                                         operation="uninstall",
6864                                                         root_config=inst_pkg.root_config,
6865                                                         type_name=inst_pkg.type_name)
6866                                                 self._pkg_cache[uninst_task] = uninst_task
6867                                                 # Enforce correct merge order with a hard dep.
6868                                                 self.digraph.addnode(uninst_task, inst_task,
6869                                                         priority=BlockerDepPriority.instance)
6870                                                 # Count references to this blocker so that it can be
6871                                                 # invalidated after nodes referencing it have been
6872                                                 # merged.
6873                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6874                                 if not unresolved_blocks and not depends_on_order:
6875                                         self._irrelevant_blockers.add(blocker, parent)
6876                                         self._blocker_parents.remove_edge(blocker, parent)
6877                                         if not self._blocker_parents.parent_nodes(blocker):
6878                                                 self._blocker_parents.remove(blocker)
6879                                         if not self._blocker_parents.child_nodes(parent):
6880                                                 self._blocker_parents.remove(parent)
6881                                 if unresolved_blocks:
6882                                         self._unsolvable_blockers.add(blocker, parent)
6883
6884                 return True
6885
6886         def _accept_blocker_conflicts(self):
6887                 acceptable = False
6888                 for x in ("--buildpkgonly", "--fetchonly",
6889                         "--fetch-all-uri", "--nodeps"):
6890                         if x in self.myopts:
6891                                 acceptable = True
6892                                 break
6893                 return acceptable
6894
6895         def _merge_order_bias(self, mygraph):
6896                 """
6897                 For optimal leaf node selection, promote deep system runtime deps and
6898                 order nodes from highest to lowest overall reference count.
6899                 """
6900
6901                 node_info = {}
6902                 for node in mygraph.order:
6903                         node_info[node] = len(mygraph.parent_nodes(node))
6904                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6905
6906                 def cmp_merge_preference(node1, node2):
6907
6908                         if node1.operation == 'uninstall':
6909                                 if node2.operation == 'uninstall':
6910                                         return 0
6911                                 return 1
6912
6913                         if node2.operation == 'uninstall':
6914                                 if node1.operation == 'uninstall':
6915                                         return 0
6916                                 return -1
6917
6918                         node1_sys = node1 in deep_system_deps
6919                         node2_sys = node2 in deep_system_deps
6920                         if node1_sys != node2_sys:
6921                                 if node1_sys:
6922                                         return -1
6923                                 return 1
6924
6925                         return node_info[node2] - node_info[node1]
6926
6927                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6928
6929         def altlist(self, reversed=False):
6930
6931                 while self._serialized_tasks_cache is None:
6932                         self._resolve_conflicts()
6933                         try:
6934                                 self._serialized_tasks_cache, self._scheduler_graph = \
6935                                         self._serialize_tasks()
6936                         except self._serialize_tasks_retry:
6937                                 pass
6938
6939                 retlist = self._serialized_tasks_cache[:]
6940                 if reversed:
6941                         retlist.reverse()
6942                 return retlist
6943
6944         def schedulerGraph(self):
6945                 """
6946                 The scheduler graph is identical to the normal one except that
6947                 uninstall edges are reversed in specific cases that require
6948                 conflicting packages to be temporarily installed simultaneously.
6949                 This is intended for use by the Scheduler in it's parallelization
6950                 logic. It ensures that temporary simultaneous installation of
6951                 conflicting packages is avoided when appropriate (especially for
6952                 !!atom blockers), but allowed in specific cases that require it.
6953
6954                 Note that this method calls break_refs() which alters the state of
6955                 internal Package instances such that this depgraph instance should
6956                 not be used to perform any more calculations.
6957                 """
6958                 if self._scheduler_graph is None:
6959                         self.altlist()
6960                 self.break_refs(self._scheduler_graph.order)
6961                 return self._scheduler_graph
6962
6963         def break_refs(self, nodes):
6964                 """
6965                 Take a mergelist like that returned from self.altlist() and
6966                 break any references that lead back to the depgraph. This is
6967                 useful if you want to hold references to packages without
6968                 also holding the depgraph on the heap.
6969                 """
6970                 for node in nodes:
6971                         if hasattr(node, "root_config"):
6972                                 # The FakeVartree references the _package_cache which
6973                                 # references the depgraph. So that Package instances don't
6974                                 # hold the depgraph and FakeVartree on the heap, replace
6975                                 # the RootConfig that references the FakeVartree with the
6976                                 # original RootConfig instance which references the actual
6977                                 # vartree.
6978                                 node.root_config = \
6979                                         self._trees_orig[node.root_config.root]["root_config"]
6980
6981         def _resolve_conflicts(self):
6982                 if not self._complete_graph():
6983                         raise self._unknown_internal_error()
6984
6985                 if not self.validate_blockers():
6986                         raise self._unknown_internal_error()
6987
6988                 if self._slot_collision_info:
6989                         self._process_slot_conflicts()
6990
6991         def _serialize_tasks(self):
6992
6993                 if "--debug" in self.myopts:
6994                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6995                         self.digraph.debug_print()
6996                         writemsg("\n", noiselevel=-1)
6997
6998                 scheduler_graph = self.digraph.copy()
6999                 mygraph=self.digraph.copy()
7000                 # Prune "nomerge" root nodes if nothing depends on them, since
7001                 # otherwise they slow down merge order calculation. Don't remove
7002                 # non-root nodes since they help optimize merge order in some cases
7003                 # such as revdep-rebuild.
7004                 removed_nodes = set()
7005                 while True:
7006                         for node in mygraph.root_nodes():
7007                                 if not isinstance(node, Package) or \
7008                                         node.installed or node.onlydeps:
7009                                         removed_nodes.add(node)
7010                         if removed_nodes:
7011                                 self.spinner.update()
7012                                 mygraph.difference_update(removed_nodes)
7013                         if not removed_nodes:
7014                                 break
7015                         removed_nodes.clear()
7016                 self._merge_order_bias(mygraph)
7017                 def cmp_circular_bias(n1, n2):
7018                         """
7019                         RDEPEND is stronger than PDEPEND and this function
7020                         measures such a strength bias within a circular
7021                         dependency relationship.
7022                         """
7023                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7024                                 ignore_priority=priority_range.ignore_medium_soft)
7025                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7026                                 ignore_priority=priority_range.ignore_medium_soft)
7027                         if n1_n2_medium == n2_n1_medium:
7028                                 return 0
7029                         elif n1_n2_medium:
7030                                 return 1
7031                         return -1
7032                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7033                 retlist=[]
7034                 # Contains uninstall tasks that have been scheduled to
7035                 # occur after overlapping blockers have been installed.
7036                 scheduled_uninstalls = set()
7037                 # Contains any Uninstall tasks that have been ignored
7038                 # in order to avoid the circular deps code path. These
7039                 # correspond to blocker conflicts that could not be
7040                 # resolved.
7041                 ignored_uninstall_tasks = set()
7042                 have_uninstall_task = False
7043                 complete = "complete" in self.myparams
7044                 asap_nodes = []
7045
7046                 def get_nodes(**kwargs):
7047                         """
7048                         Returns leaf nodes excluding Uninstall instances
7049                         since those should be executed as late as possible.
7050                         """
7051                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7052                                 if isinstance(node, Package) and \
7053                                         (node.operation != "uninstall" or \
7054                                         node in scheduled_uninstalls)]
7055
7056                 # sys-apps/portage needs special treatment if ROOT="/"
7057                 running_root = self._running_root.root
7058                 from portage.const import PORTAGE_PACKAGE_ATOM
7059                 runtime_deps = InternalPackageSet(
7060                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7061                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7062                         PORTAGE_PACKAGE_ATOM)
7063                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7064                         PORTAGE_PACKAGE_ATOM)
7065
7066                 if running_portage:
7067                         running_portage = running_portage[0]
7068                 else:
7069                         running_portage = None
7070
7071                 if replacement_portage:
7072                         replacement_portage = replacement_portage[0]
7073                 else:
7074                         replacement_portage = None
7075
7076                 if replacement_portage == running_portage:
7077                         replacement_portage = None
7078
7079                 if replacement_portage is not None:
7080                         # update from running_portage to replacement_portage asap
7081                         asap_nodes.append(replacement_portage)
7082
7083                 if running_portage is not None:
7084                         try:
7085                                 portage_rdepend = self._select_atoms_highest_available(
7086                                         running_root, running_portage.metadata["RDEPEND"],
7087                                         myuse=running_portage.use.enabled,
7088                                         parent=running_portage, strict=False)
7089                         except portage.exception.InvalidDependString, e:
7090                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7091                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7092                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7093                                 del e
7094                                 portage_rdepend = []
7095                         runtime_deps.update(atom for atom in portage_rdepend \
7096                                 if not atom.startswith("!"))
7097
7098                 def gather_deps(ignore_priority, mergeable_nodes,
7099                         selected_nodes, node):
7100                         """
7101                         Recursively gather a group of nodes that RDEPEND on
7102                         eachother. This ensures that they are merged as a group
7103                         and get their RDEPENDs satisfied as soon as possible.
7104                         """
7105                         if node in selected_nodes:
7106                                 return True
7107                         if node not in mergeable_nodes:
7108                                 return False
7109                         if node == replacement_portage and \
7110                                 mygraph.child_nodes(node,
7111                                 ignore_priority=priority_range.ignore_medium_soft):
7112                                 # Make sure that portage always has all of it's
7113                                 # RDEPENDs installed first.
7114                                 return False
7115                         selected_nodes.add(node)
7116                         for child in mygraph.child_nodes(node,
7117                                 ignore_priority=ignore_priority):
7118                                 if not gather_deps(ignore_priority,
7119                                         mergeable_nodes, selected_nodes, child):
7120                                         return False
7121                         return True
7122
7123                 def ignore_uninst_or_med(priority):
7124                         if priority is BlockerDepPriority.instance:
7125                                 return True
7126                         return priority_range.ignore_medium(priority)
7127
7128                 def ignore_uninst_or_med_soft(priority):
7129                         if priority is BlockerDepPriority.instance:
7130                                 return True
7131                         return priority_range.ignore_medium_soft(priority)
7132
7133                 tree_mode = "--tree" in self.myopts
7134                 # Tracks whether or not the current iteration should prefer asap_nodes
7135                 # if available.  This is set to False when the previous iteration
7136                 # failed to select any nodes.  It is reset whenever nodes are
7137                 # successfully selected.
7138                 prefer_asap = True
7139
7140                 # Controls whether or not the current iteration should drop edges that
7141                 # are "satisfied" by installed packages, in order to solve circular
7142                 # dependencies. The deep runtime dependencies of installed packages are
7143                 # not checked in this case (bug #199856), so it must be avoided
7144                 # whenever possible.
7145                 drop_satisfied = False
7146
7147                 # State of variables for successive iterations that loosen the
7148                 # criteria for node selection.
7149                 #
7150                 # iteration   prefer_asap   drop_satisfied
7151                 # 1           True          False
7152                 # 2           False         False
7153                 # 3           False         True
7154                 #
7155                 # If no nodes are selected on the last iteration, it is due to
7156                 # unresolved blockers or circular dependencies.
7157
7158                 while not mygraph.empty():
7159                         self.spinner.update()
7160                         selected_nodes = None
7161                         ignore_priority = None
7162                         if drop_satisfied or (prefer_asap and asap_nodes):
7163                                 priority_range = DepPrioritySatisfiedRange
7164                         else:
7165                                 priority_range = DepPriorityNormalRange
7166                         if prefer_asap and asap_nodes:
7167                                 # ASAP nodes are merged before their soft deps. Go ahead and
7168                                 # select root nodes here if necessary, since it's typical for
7169                                 # the parent to have been removed from the graph already.
7170                                 asap_nodes = [node for node in asap_nodes \
7171                                         if mygraph.contains(node)]
7172                                 for node in asap_nodes:
7173                                         if not mygraph.child_nodes(node,
7174                                                 ignore_priority=priority_range.ignore_soft):
7175                                                 selected_nodes = [node]
7176                                                 asap_nodes.remove(node)
7177                                                 break
7178                         if not selected_nodes and \
7179                                 not (prefer_asap and asap_nodes):
7180                                 for i in xrange(priority_range.NONE,
7181                                         priority_range.MEDIUM_SOFT + 1):
7182                                         ignore_priority = priority_range.ignore_priority[i]
7183                                         nodes = get_nodes(ignore_priority=ignore_priority)
7184                                         if nodes:
7185                                                 # If there is a mix of uninstall nodes with other
7186                                                 # types, save the uninstall nodes for later since
7187                                                 # sometimes a merge node will render an uninstall
7188                                                 # node unnecessary (due to occupying the same slot),
7189                                                 # and we want to avoid executing a separate uninstall
7190                                                 # task in that case.
7191                                                 if len(nodes) > 1:
7192                                                         good_uninstalls = []
7193                                                         with_some_uninstalls_excluded = []
7194                                                         for node in nodes:
7195                                                                 if node.operation == "uninstall":
7196                                                                         slot_node = self.mydbapi[node.root
7197                                                                                 ].match_pkgs(node.slot_atom)
7198                                                                         if slot_node and \
7199                                                                                 slot_node[0].operation == "merge":
7200                                                                                 continue
7201                                                                         good_uninstalls.append(node)
7202                                                                 with_some_uninstalls_excluded.append(node)
7203                                                         if good_uninstalls:
7204                                                                 nodes = good_uninstalls
7205                                                         elif with_some_uninstalls_excluded:
7206                                                                 nodes = with_some_uninstalls_excluded
7207                                                         else:
7208                                                                 nodes = nodes
7209
7210                                                 if ignore_priority is None and not tree_mode:
7211                                                         # Greedily pop all of these nodes since no
7212                                                         # relationship has been ignored. This optimization
7213                                                         # destroys --tree output, so it's disabled in tree
7214                                                         # mode.
7215                                                         selected_nodes = nodes
7216                                                 else:
7217                                                         # For optimal merge order:
7218                                                         #  * Only pop one node.
7219                                                         #  * Removing a root node (node without a parent)
7220                                                         #    will not produce a leaf node, so avoid it.
7221                                                         #  * It's normal for a selected uninstall to be a
7222                                                         #    root node, so don't check them for parents.
7223                                                         for node in nodes:
7224                                                                 if node.operation == "uninstall" or \
7225                                                                         mygraph.parent_nodes(node):
7226                                                                         selected_nodes = [node]
7227                                                                         break
7228
7229                                                 if selected_nodes:
7230                                                         break
7231
7232                         if not selected_nodes:
7233                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7234                                 if nodes:
7235                                         mergeable_nodes = set(nodes)
7236                                         if prefer_asap and asap_nodes:
7237                                                 nodes = asap_nodes
7238                                         for i in xrange(priority_range.SOFT,
7239                                                 priority_range.MEDIUM_SOFT + 1):
7240                                                 ignore_priority = priority_range.ignore_priority[i]
7241                                                 for node in nodes:
7242                                                         if not mygraph.parent_nodes(node):
7243                                                                 continue
7244                                                         selected_nodes = set()
7245                                                         if gather_deps(ignore_priority,
7246                                                                 mergeable_nodes, selected_nodes, node):
7247                                                                 break
7248                                                         else:
7249                                                                 selected_nodes = None
7250                                                 if selected_nodes:
7251                                                         break
7252
7253                                         if prefer_asap and asap_nodes and not selected_nodes:
7254                                                 # We failed to find any asap nodes to merge, so ignore
7255                                                 # them for the next iteration.
7256                                                 prefer_asap = False
7257                                                 continue
7258
7259                         if selected_nodes and ignore_priority is not None:
7260                                 # Try to merge ignored medium_soft deps as soon as possible
7261                                 # if they're not satisfied by installed packages.
7262                                 for node in selected_nodes:
7263                                         children = set(mygraph.child_nodes(node))
7264                                         soft = children.difference(
7265                                                 mygraph.child_nodes(node,
7266                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7267                                         medium_soft = children.difference(
7268                                                 mygraph.child_nodes(node,
7269                                                         ignore_priority = \
7270                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7271                                         medium_soft.difference_update(soft)
7272                                         for child in medium_soft:
7273                                                 if child in selected_nodes:
7274                                                         continue
7275                                                 if child in asap_nodes:
7276                                                         continue
7277                                                 asap_nodes.append(child)
7278
7279                         if selected_nodes and len(selected_nodes) > 1:
7280                                 if not isinstance(selected_nodes, list):
7281                                         selected_nodes = list(selected_nodes)
7282                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7283
7284                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7285                                 # An Uninstall task needs to be executed in order to
7286                                 # avoid conflict if possible.
7287
7288                                 if drop_satisfied:
7289                                         priority_range = DepPrioritySatisfiedRange
7290                                 else:
7291                                         priority_range = DepPriorityNormalRange
7292
7293                                 mergeable_nodes = get_nodes(
7294                                         ignore_priority=ignore_uninst_or_med)
7295
7296                                 min_parent_deps = None
7297                                 uninst_task = None
7298                                 for task in myblocker_uninstalls.leaf_nodes():
7299                                         # Do some sanity checks so that system or world packages
7300                                         # don't get uninstalled inappropriately here (only really
7301                                         # necessary when --complete-graph has not been enabled).
7302
7303                                         if task in ignored_uninstall_tasks:
7304                                                 continue
7305
7306                                         if task in scheduled_uninstalls:
7307                                                 # It's been scheduled but it hasn't
7308                                                 # been executed yet due to dependence
7309                                                 # on installation of blocking packages.
7310                                                 continue
7311
7312                                         root_config = self.roots[task.root]
7313                                         inst_pkg = self._pkg_cache[
7314                                                 ("installed", task.root, task.cpv, "nomerge")]
7315
7316                                         if self.digraph.contains(inst_pkg):
7317                                                 continue
7318
7319                                         forbid_overlap = False
7320                                         heuristic_overlap = False
7321                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7322                                                 if blocker.eapi in ("0", "1"):
7323                                                         heuristic_overlap = True
7324                                                 elif blocker.atom.blocker.overlap.forbid:
7325                                                         forbid_overlap = True
7326                                                         break
7327                                         if forbid_overlap and running_root == task.root:
7328                                                 continue
7329
7330                                         if heuristic_overlap and running_root == task.root:
7331                                                 # Never uninstall sys-apps/portage or it's essential
7332                                                 # dependencies, except through replacement.
7333                                                 try:
7334                                                         runtime_dep_atoms = \
7335                                                                 list(runtime_deps.iterAtomsForPackage(task))
7336                                                 except portage.exception.InvalidDependString, e:
7337                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7338                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7339                                                                 (task.root, task.cpv, e), noiselevel=-1)
7340                                                         del e
7341                                                         continue
7342
7343                                                 # Don't uninstall a runtime dep if it appears
7344                                                 # to be the only suitable one installed.
7345                                                 skip = False
7346                                                 vardb = root_config.trees["vartree"].dbapi
7347                                                 for atom in runtime_dep_atoms:
7348                                                         other_version = None
7349                                                         for pkg in vardb.match_pkgs(atom):
7350                                                                 if pkg.cpv == task.cpv and \
7351                                                                         pkg.metadata["COUNTER"] == \
7352                                                                         task.metadata["COUNTER"]:
7353                                                                         continue
7354                                                                 other_version = pkg
7355                                                                 break
7356                                                         if other_version is None:
7357                                                                 skip = True
7358                                                                 break
7359                                                 if skip:
7360                                                         continue
7361
7362                                                 # For packages in the system set, don't take
7363                                                 # any chances. If the conflict can't be resolved
7364                                                 # by a normal replacement operation then abort.
7365                                                 skip = False
7366                                                 try:
7367                                                         for atom in root_config.sets[
7368                                                                 "system"].iterAtomsForPackage(task):
7369                                                                 skip = True
7370                                                                 break
7371                                                 except portage.exception.InvalidDependString, e:
7372                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7373                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7374                                                                 (task.root, task.cpv, e), noiselevel=-1)
7375                                                         del e
7376                                                         skip = True
7377                                                 if skip:
7378                                                         continue
7379
7380                                         # Note that the world check isn't always
7381                                         # necessary since self._complete_graph() will
7382                                         # add all packages from the system and world sets to the
7383                                         # graph. This just allows unresolved conflicts to be
7384                                         # detected as early as possible, which makes it possible
7385                                         # to avoid calling self._complete_graph() when it is
7386                                         # unnecessary due to blockers triggering an abortion.
7387                                         if not complete:
7388                                                 # For packages in the world set, go ahead an uninstall
7389                                                 # when necessary, as long as the atom will be satisfied
7390                                                 # in the final state.
7391                                                 graph_db = self.mydbapi[task.root]
7392                                                 skip = False
7393                                                 try:
7394                                                         for atom in root_config.sets[
7395                                                                 "world"].iterAtomsForPackage(task):
7396                                                                 satisfied = False
7397                                                                 for pkg in graph_db.match_pkgs(atom):
7398                                                                         if pkg == inst_pkg:
7399                                                                                 continue
7400                                                                         satisfied = True
7401                                                                         break
7402                                                                 if not satisfied:
7403                                                                         skip = True
7404                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7405                                                                         break
7406                                                 except portage.exception.InvalidDependString, e:
7407                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7408                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7409                                                                 (task.root, task.cpv, e), noiselevel=-1)
7410                                                         del e
7411                                                         skip = True
7412                                                 if skip:
7413                                                         continue
7414
7415                                         # Check the deps of parent nodes to ensure that
7416                                         # the chosen task produces a leaf node. Maybe
7417                                         # this can be optimized some more to make the
7418                                         # best possible choice, but the current algorithm
7419                                         # is simple and should be near optimal for most
7420                                         # common cases.
7421                                         mergeable_parent = False
7422                                         parent_deps = set()
7423                                         for parent in mygraph.parent_nodes(task):
7424                                                 parent_deps.update(mygraph.child_nodes(parent,
7425                                                         ignore_priority=priority_range.ignore_medium_soft))
7426                                                 if parent in mergeable_nodes and \
7427                                                         gather_deps(ignore_uninst_or_med_soft,
7428                                                         mergeable_nodes, set(), parent):
7429                                                         mergeable_parent = True
7430
7431                                         if not mergeable_parent:
7432                                                 continue
7433
7434                                         parent_deps.remove(task)
7435                                         if min_parent_deps is None or \
7436                                                 len(parent_deps) < min_parent_deps:
7437                                                 min_parent_deps = len(parent_deps)
7438                                                 uninst_task = task
7439
7440                                 if uninst_task is not None:
7441                                         # The uninstall is performed only after blocking
7442                                         # packages have been merged on top of it. File
7443                                         # collisions between blocking packages are detected
7444                                         # and removed from the list of files to be uninstalled.
7445                                         scheduled_uninstalls.add(uninst_task)
7446                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7447
7448                                         # Reverse the parent -> uninstall edges since we want
7449                                         # to do the uninstall after blocking packages have
7450                                         # been merged on top of it.
7451                                         mygraph.remove(uninst_task)
7452                                         for blocked_pkg in parent_nodes:
7453                                                 mygraph.add(blocked_pkg, uninst_task,
7454                                                         priority=BlockerDepPriority.instance)
7455                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7456                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7457                                                         priority=BlockerDepPriority.instance)
7458
7459                                         # Reset the state variables for leaf node selection and
7460                                         # continue trying to select leaf nodes.
7461                                         prefer_asap = True
7462                                         drop_satisfied = False
7463                                         continue
7464
7465                         if not selected_nodes:
7466                                 # Only select root nodes as a last resort. This case should
7467                                 # only trigger when the graph is nearly empty and the only
7468                                 # remaining nodes are isolated (no parents or children). Since
7469                                 # the nodes must be isolated, ignore_priority is not needed.
7470                                 selected_nodes = get_nodes()
7471
7472                         if not selected_nodes and not drop_satisfied:
7473                                 drop_satisfied = True
7474                                 continue
7475
7476                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7477                                 # If possible, drop an uninstall task here in order to avoid
7478                                 # the circular deps code path. The corresponding blocker will
7479                                 # still be counted as an unresolved conflict.
7480                                 uninst_task = None
7481                                 for node in myblocker_uninstalls.leaf_nodes():
7482                                         try:
7483                                                 mygraph.remove(node)
7484                                         except KeyError:
7485                                                 pass
7486                                         else:
7487                                                 uninst_task = node
7488                                                 ignored_uninstall_tasks.add(node)
7489                                                 break
7490
7491                                 if uninst_task is not None:
7492                                         # Reset the state variables for leaf node selection and
7493                                         # continue trying to select leaf nodes.
7494                                         prefer_asap = True
7495                                         drop_satisfied = False
7496                                         continue
7497
7498                         if not selected_nodes:
7499                                 self._circular_deps_for_display = mygraph
7500                                 raise self._unknown_internal_error()
7501
7502                         # At this point, we've succeeded in selecting one or more nodes, so
7503                         # reset state variables for leaf node selection.
7504                         prefer_asap = True
7505                         drop_satisfied = False
7506
7507                         mygraph.difference_update(selected_nodes)
7508
7509                         for node in selected_nodes:
7510                                 if isinstance(node, Package) and \
7511                                         node.operation == "nomerge":
7512                                         continue
7513
7514                                 # Handle interactions between blockers
7515                                 # and uninstallation tasks.
7516                                 solved_blockers = set()
7517                                 uninst_task = None
7518                                 if isinstance(node, Package) and \
7519                                         "uninstall" == node.operation:
7520                                         have_uninstall_task = True
7521                                         uninst_task = node
7522                                 else:
7523                                         vardb = self.trees[node.root]["vartree"].dbapi
7524                                         previous_cpv = vardb.match(node.slot_atom)
7525                                         if previous_cpv:
7526                                                 # The package will be replaced by this one, so remove
7527                                                 # the corresponding Uninstall task if necessary.
7528                                                 previous_cpv = previous_cpv[0]
7529                                                 uninst_task = \
7530                                                         ("installed", node.root, previous_cpv, "uninstall")
7531                                                 try:
7532                                                         mygraph.remove(uninst_task)
7533                                                 except KeyError:
7534                                                         pass
7535
7536                                 if uninst_task is not None and \
7537                                         uninst_task not in ignored_uninstall_tasks and \
7538                                         myblocker_uninstalls.contains(uninst_task):
7539                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7540                                         myblocker_uninstalls.remove(uninst_task)
7541                                         # Discard any blockers that this Uninstall solves.
7542                                         for blocker in blocker_nodes:
7543                                                 if not myblocker_uninstalls.child_nodes(blocker):
7544                                                         myblocker_uninstalls.remove(blocker)
7545                                                         solved_blockers.add(blocker)
7546
7547                                 retlist.append(node)
7548
7549                                 if (isinstance(node, Package) and \
7550                                         "uninstall" == node.operation) or \
7551                                         (uninst_task is not None and \
7552                                         uninst_task in scheduled_uninstalls):
7553                                         # Include satisfied blockers in the merge list
7554                                         # since the user might be interested and also
7555                                         # it serves as an indicator that blocking packages
7556                                         # will be temporarily installed simultaneously.
7557                                         for blocker in solved_blockers:
7558                                                 retlist.append(Blocker(atom=blocker.atom,
7559                                                         root=blocker.root, eapi=blocker.eapi,
7560                                                         satisfied=True))
7561
7562                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7563                 for node in myblocker_uninstalls.root_nodes():
7564                         unsolvable_blockers.add(node)
7565
7566                 for blocker in unsolvable_blockers:
7567                         retlist.append(blocker)
7568
7569                 # If any Uninstall tasks need to be executed in order
7570                 # to avoid a conflict, complete the graph with any
7571                 # dependencies that may have been initially
7572                 # neglected (to ensure that unsafe Uninstall tasks
7573                 # are properly identified and blocked from execution).
7574                 if have_uninstall_task and \
7575                         not complete and \
7576                         not unsolvable_blockers:
7577                         self.myparams.add("complete")
7578                         raise self._serialize_tasks_retry("")
7579
7580                 if unsolvable_blockers and \
7581                         not self._accept_blocker_conflicts():
7582                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7583                         self._serialized_tasks_cache = retlist[:]
7584                         self._scheduler_graph = scheduler_graph
7585                         raise self._unknown_internal_error()
7586
7587                 if self._slot_collision_info and \
7588                         not self._accept_blocker_conflicts():
7589                         self._serialized_tasks_cache = retlist[:]
7590                         self._scheduler_graph = scheduler_graph
7591                         raise self._unknown_internal_error()
7592
7593                 return retlist, scheduler_graph
7594
7595         def _show_circular_deps(self, mygraph):
7596                 # No leaf nodes are available, so we have a circular
7597                 # dependency panic situation.  Reduce the noise level to a
7598                 # minimum via repeated elimination of root nodes since they
7599                 # have no parents and thus can not be part of a cycle.
7600                 while True:
7601                         root_nodes = mygraph.root_nodes(
7602                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7603                         if not root_nodes:
7604                                 break
7605                         mygraph.difference_update(root_nodes)
7606                 # Display the USE flags that are enabled on nodes that are part
7607                 # of dependency cycles in case that helps the user decide to
7608                 # disable some of them.
7609                 display_order = []
7610                 tempgraph = mygraph.copy()
7611                 while not tempgraph.empty():
7612                         nodes = tempgraph.leaf_nodes()
7613                         if not nodes:
7614                                 node = tempgraph.order[0]
7615                         else:
7616                                 node = nodes[0]
7617                         display_order.append(node)
7618                         tempgraph.remove(node)
7619                 display_order.reverse()
7620                 self.myopts.pop("--quiet", None)
7621                 self.myopts.pop("--verbose", None)
7622                 self.myopts["--tree"] = True
7623                 portage.writemsg("\n\n", noiselevel=-1)
7624                 self.display(display_order)
7625                 prefix = colorize("BAD", " * ")
7626                 portage.writemsg("\n", noiselevel=-1)
7627                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7628                         noiselevel=-1)
7629                 portage.writemsg("\n", noiselevel=-1)
7630                 mygraph.debug_print()
7631                 portage.writemsg("\n", noiselevel=-1)
7632                 portage.writemsg(prefix + "Note that circular dependencies " + \
7633                         "can often be avoided by temporarily\n", noiselevel=-1)
7634                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7635                         "optional dependencies.\n", noiselevel=-1)
7636
7637         def _show_merge_list(self):
7638                 if self._serialized_tasks_cache is not None and \
7639                         not (self._displayed_list and \
7640                         (self._displayed_list == self._serialized_tasks_cache or \
7641                         self._displayed_list == \
7642                                 list(reversed(self._serialized_tasks_cache)))):
7643                         display_list = self._serialized_tasks_cache[:]
7644                         if "--tree" in self.myopts:
7645                                 display_list.reverse()
7646                         self.display(display_list)
7647
7648         def _show_unsatisfied_blockers(self, blockers):
7649                 self._show_merge_list()
7650                 msg = "Error: The above package list contains " + \
7651                         "packages which cannot be installed " + \
7652                         "at the same time on the same system."
7653                 prefix = colorize("BAD", " * ")
7654                 from textwrap import wrap
7655                 portage.writemsg("\n", noiselevel=-1)
7656                 for line in wrap(msg, 70):
7657                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7658
7659                 # Display the conflicting packages along with the packages
7660                 # that pulled them in. This is helpful for troubleshooting
7661                 # cases in which blockers don't solve automatically and
7662                 # the reasons are not apparent from the normal merge list
7663                 # display.
7664
7665                 conflict_pkgs = {}
7666                 for blocker in blockers:
7667                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7668                                 self._blocker_parents.parent_nodes(blocker)):
7669                                 parent_atoms = self._parent_atoms.get(pkg)
7670                                 if not parent_atoms:
7671                                         atom = self._blocked_world_pkgs.get(pkg)
7672                                         if atom is not None:
7673                                                 parent_atoms = set([("@world", atom)])
7674                                 if parent_atoms:
7675                                         conflict_pkgs[pkg] = parent_atoms
7676
7677                 if conflict_pkgs:
7678                         # Reduce noise by pruning packages that are only
7679                         # pulled in by other conflict packages.
7680                         pruned_pkgs = set()
7681                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7682                                 relevant_parent = False
7683                                 for parent, atom in parent_atoms:
7684                                         if parent not in conflict_pkgs:
7685                                                 relevant_parent = True
7686                                                 break
7687                                 if not relevant_parent:
7688                                         pruned_pkgs.add(pkg)
7689                         for pkg in pruned_pkgs:
7690                                 del conflict_pkgs[pkg]
7691
7692                 if conflict_pkgs:
7693                         msg = []
7694                         msg.append("\n")
7695                         indent = "  "
7696                         # Max number of parents shown, to avoid flooding the display.
7697                         max_parents = 3
7698                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7699
7700                                 pruned_list = set()
7701
7702                                 # Prefer packages that are not directly involved in a conflict.
7703                                 for parent_atom in parent_atoms:
7704                                         if len(pruned_list) >= max_parents:
7705                                                 break
7706                                         parent, atom = parent_atom
7707                                         if parent not in conflict_pkgs:
7708                                                 pruned_list.add(parent_atom)
7709
7710                                 for parent_atom in parent_atoms:
7711                                         if len(pruned_list) >= max_parents:
7712                                                 break
7713                                         pruned_list.add(parent_atom)
7714
7715                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7716                                 msg.append(indent + "%s pulled in by\n" % pkg)
7717
7718                                 for parent_atom in pruned_list:
7719                                         parent, atom = parent_atom
7720                                         msg.append(2*indent)
7721                                         if isinstance(parent,
7722                                                 (PackageArg, AtomArg)):
7723                                                 # For PackageArg and AtomArg types, it's
7724                                                 # redundant to display the atom attribute.
7725                                                 msg.append(str(parent))
7726                                         else:
7727                                                 # Display the specific atom from SetArg or
7728                                                 # Package types.
7729                                                 msg.append("%s required by %s" % (atom, parent))
7730                                         msg.append("\n")
7731
7732                                 if omitted_parents:
7733                                         msg.append(2*indent)
7734                                         msg.append("(and %d more)\n" % omitted_parents)
7735
7736                                 msg.append("\n")
7737
7738                         sys.stderr.write("".join(msg))
7739                         sys.stderr.flush()
7740
7741                 if "--quiet" not in self.myopts:
7742                         show_blocker_docs_link()
7743
7744         def display(self, mylist, favorites=[], verbosity=None):
7745
7746                 # This is used to prevent display_problems() from
7747                 # redundantly displaying this exact same merge list
7748                 # again via _show_merge_list().
7749                 self._displayed_list = mylist
7750
7751                 if verbosity is None:
7752                         verbosity = ("--quiet" in self.myopts and 1 or \
7753                                 "--verbose" in self.myopts and 3 or 2)
7754                 favorites_set = InternalPackageSet(favorites)
7755                 oneshot = "--oneshot" in self.myopts or \
7756                         "--onlydeps" in self.myopts
7757                 columns = "--columns" in self.myopts
7758                 changelogs=[]
7759                 p=[]
7760                 blockers = []
7761
7762                 counters = PackageCounters()
7763
7764                 if verbosity == 1 and "--verbose" not in self.myopts:
7765                         def create_use_string(*args):
7766                                 return ""
7767                 else:
7768                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7769                                 old_iuse, old_use,
7770                                 is_new, reinst_flags,
7771                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7772                                 alphabetical=("--alphabetical" in self.myopts)):
7773                                 enabled = []
7774                                 if alphabetical:
7775                                         disabled = enabled
7776                                         removed = enabled
7777                                 else:
7778                                         disabled = []
7779                                         removed = []
7780                                 cur_iuse = set(cur_iuse)
7781                                 enabled_flags = cur_iuse.intersection(cur_use)
7782                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7783                                 any_iuse = cur_iuse.union(old_iuse)
7784                                 any_iuse = list(any_iuse)
7785                                 any_iuse.sort()
7786                                 for flag in any_iuse:
7787                                         flag_str = None
7788                                         isEnabled = False
7789                                         reinst_flag = reinst_flags and flag in reinst_flags
7790                                         if flag in enabled_flags:
7791                                                 isEnabled = True
7792                                                 if is_new or flag in old_use and \
7793                                                         (all_flags or reinst_flag):
7794                                                         flag_str = red(flag)
7795                                                 elif flag not in old_iuse:
7796                                                         flag_str = yellow(flag) + "%*"
7797                                                 elif flag not in old_use:
7798                                                         flag_str = green(flag) + "*"
7799                                         elif flag in removed_iuse:
7800                                                 if all_flags or reinst_flag:
7801                                                         flag_str = yellow("-" + flag) + "%"
7802                                                         if flag in old_use:
7803                                                                 flag_str += "*"
7804                                                         flag_str = "(" + flag_str + ")"
7805                                                         removed.append(flag_str)
7806                                                 continue
7807                                         else:
7808                                                 if is_new or flag in old_iuse and \
7809                                                         flag not in old_use and \
7810                                                         (all_flags or reinst_flag):
7811                                                         flag_str = blue("-" + flag)
7812                                                 elif flag not in old_iuse:
7813                                                         flag_str = yellow("-" + flag)
7814                                                         if flag not in iuse_forced:
7815                                                                 flag_str += "%"
7816                                                 elif flag in old_use:
7817                                                         flag_str = green("-" + flag) + "*"
7818                                         if flag_str:
7819                                                 if flag in iuse_forced:
7820                                                         flag_str = "(" + flag_str + ")"
7821                                                 if isEnabled:
7822                                                         enabled.append(flag_str)
7823                                                 else:
7824                                                         disabled.append(flag_str)
7825
7826                                 if alphabetical:
7827                                         ret = " ".join(enabled)
7828                                 else:
7829                                         ret = " ".join(enabled + disabled + removed)
7830                                 if ret:
7831                                         ret = '%s="%s" ' % (name, ret)
7832                                 return ret
7833
7834                 repo_display = RepoDisplay(self.roots)
7835
7836                 tree_nodes = []
7837                 display_list = []
7838                 mygraph = self.digraph.copy()
7839
7840                 # If there are any Uninstall instances, add the corresponding
7841                 # blockers to the digraph (useful for --tree display).
7842
7843                 executed_uninstalls = set(node for node in mylist \
7844                         if isinstance(node, Package) and node.operation == "unmerge")
7845
7846                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7847                         uninstall_parents = \
7848                                 self._blocker_uninstalls.parent_nodes(uninstall)
7849                         if not uninstall_parents:
7850                                 continue
7851
7852                         # Remove the corresponding "nomerge" node and substitute
7853                         # the Uninstall node.
7854                         inst_pkg = self._pkg_cache[
7855                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7856                         try:
7857                                 mygraph.remove(inst_pkg)
7858                         except KeyError:
7859                                 pass
7860
7861                         try:
7862                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7863                         except KeyError:
7864                                 inst_pkg_blockers = []
7865
7866                         # Break the Package -> Uninstall edges.
7867                         mygraph.remove(uninstall)
7868
7869                         # Resolution of a package's blockers
7870                         # depend on it's own uninstallation.
7871                         for blocker in inst_pkg_blockers:
7872                                 mygraph.add(uninstall, blocker)
7873
7874                         # Expand Package -> Uninstall edges into
7875                         # Package -> Blocker -> Uninstall edges.
7876                         for blocker in uninstall_parents:
7877                                 mygraph.add(uninstall, blocker)
7878                                 for parent in self._blocker_parents.parent_nodes(blocker):
7879                                         if parent != inst_pkg:
7880                                                 mygraph.add(blocker, parent)
7881
7882                         # If the uninstall task did not need to be executed because
7883                         # of an upgrade, display Blocker -> Upgrade edges since the
7884                         # corresponding Blocker -> Uninstall edges will not be shown.
7885                         upgrade_node = \
7886                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7887                         if upgrade_node is not None and \
7888                                 uninstall not in executed_uninstalls:
7889                                 for blocker in uninstall_parents:
7890                                         mygraph.add(upgrade_node, blocker)
7891
7892                 unsatisfied_blockers = []
7893                 i = 0
7894                 depth = 0
7895                 shown_edges = set()
7896                 for x in mylist:
7897                         if isinstance(x, Blocker) and not x.satisfied:
7898                                 unsatisfied_blockers.append(x)
7899                                 continue
7900                         graph_key = x
7901                         if "--tree" in self.myopts:
7902                                 depth = len(tree_nodes)
7903                                 while depth and graph_key not in \
7904                                         mygraph.child_nodes(tree_nodes[depth-1]):
7905                                                 depth -= 1
7906                                 if depth:
7907                                         tree_nodes = tree_nodes[:depth]
7908                                         tree_nodes.append(graph_key)
7909                                         display_list.append((x, depth, True))
7910                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7911                                 else:
7912                                         traversed_nodes = set() # prevent endless circles
7913                                         traversed_nodes.add(graph_key)
7914                                         def add_parents(current_node, ordered):
7915                                                 parent_nodes = None
7916                                                 # Do not traverse to parents if this node is an
7917                                                 # an argument or a direct member of a set that has
7918                                                 # been specified as an argument (system or world).
7919                                                 if current_node not in self._set_nodes:
7920                                                         parent_nodes = mygraph.parent_nodes(current_node)
7921                                                 if parent_nodes:
7922                                                         child_nodes = set(mygraph.child_nodes(current_node))
7923                                                         selected_parent = None
7924                                                         # First, try to avoid a direct cycle.
7925                                                         for node in parent_nodes:
7926                                                                 if not isinstance(node, (Blocker, Package)):
7927                                                                         continue
7928                                                                 if node not in traversed_nodes and \
7929                                                                         node not in child_nodes:
7930                                                                         edge = (current_node, node)
7931                                                                         if edge in shown_edges:
7932                                                                                 continue
7933                                                                         selected_parent = node
7934                                                                         break
7935                                                         if not selected_parent:
7936                                                                 # A direct cycle is unavoidable.
7937                                                                 for node in parent_nodes:
7938                                                                         if not isinstance(node, (Blocker, Package)):
7939                                                                                 continue
7940                                                                         if node not in traversed_nodes:
7941                                                                                 edge = (current_node, node)
7942                                                                                 if edge in shown_edges:
7943                                                                                         continue
7944                                                                                 selected_parent = node
7945                                                                                 break
7946                                                         if selected_parent:
7947                                                                 shown_edges.add((current_node, selected_parent))
7948                                                                 traversed_nodes.add(selected_parent)
7949                                                                 add_parents(selected_parent, False)
7950                                                 display_list.append((current_node,
7951                                                         len(tree_nodes), ordered))
7952                                                 tree_nodes.append(current_node)
7953                                         tree_nodes = []
7954                                         add_parents(graph_key, True)
7955                         else:
7956                                 display_list.append((x, depth, True))
7957                 mylist = display_list
7958                 for x in unsatisfied_blockers:
7959                         mylist.append((x, 0, True))
7960
7961                 last_merge_depth = 0
7962                 for i in xrange(len(mylist)-1,-1,-1):
7963                         graph_key, depth, ordered = mylist[i]
7964                         if not ordered and depth == 0 and i > 0 \
7965                                 and graph_key == mylist[i-1][0] and \
7966                                 mylist[i-1][1] == 0:
7967                                 # An ordered node got a consecutive duplicate when the tree was
7968                                 # being filled in.
7969                                 del mylist[i]
7970                                 continue
7971                         if ordered and graph_key[-1] != "nomerge":
7972                                 last_merge_depth = depth
7973                                 continue
7974                         if depth >= last_merge_depth or \
7975                                 i < len(mylist) - 1 and \
7976                                 depth >= mylist[i+1][1]:
7977                                         del mylist[i]
7978
7979                 from portage import flatten
7980                 from portage.dep import use_reduce, paren_reduce
7981                 # files to fetch list - avoids counting a same file twice
7982                 # in size display (verbose mode)
7983                 myfetchlist=[]
7984
7985                 # Use this set to detect when all the "repoadd" strings are "[0]"
7986                 # and disable the entire repo display in this case.
7987                 repoadd_set = set()
7988
7989                 for mylist_index in xrange(len(mylist)):
7990                         x, depth, ordered = mylist[mylist_index]
7991                         pkg_type = x[0]
7992                         myroot = x[1]
7993                         pkg_key = x[2]
7994                         portdb = self.trees[myroot]["porttree"].dbapi
7995                         bindb  = self.trees[myroot]["bintree"].dbapi
7996                         vardb = self.trees[myroot]["vartree"].dbapi
7997                         vartree = self.trees[myroot]["vartree"]
7998                         pkgsettings = self.pkgsettings[myroot]
7999
8000                         fetch=" "
8001                         indent = " " * depth
8002
8003                         if isinstance(x, Blocker):
8004                                 if x.satisfied:
8005                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8006                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8007                                 else:
8008                                         blocker_style = "PKG_BLOCKER"
8009                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8010                                 if ordered:
8011                                         counters.blocks += 1
8012                                         if x.satisfied:
8013                                                 counters.blocks_satisfied += 1
8014                                 resolved = portage.key_expand(
8015                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8016                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8017                                         addl += " " + colorize(blocker_style, resolved)
8018                                 else:
8019                                         addl = "[%s %s] %s%s" % \
8020                                                 (colorize(blocker_style, "blocks"),
8021                                                 addl, indent, colorize(blocker_style, resolved))
8022                                 block_parents = self._blocker_parents.parent_nodes(x)
8023                                 block_parents = set([pnode[2] for pnode in block_parents])
8024                                 block_parents = ", ".join(block_parents)
8025                                 if resolved!=x[2]:
8026                                         addl += colorize(blocker_style,
8027                                                 " (\"%s\" is blocking %s)") % \
8028                                                 (str(x.atom).lstrip("!"), block_parents)
8029                                 else:
8030                                         addl += colorize(blocker_style,
8031                                                 " (is blocking %s)") % block_parents
8032                                 if isinstance(x, Blocker) and x.satisfied:
8033                                         if columns:
8034                                                 continue
8035                                         p.append(addl)
8036                                 else:
8037                                         blockers.append(addl)
8038                         else:
8039                                 pkg_status = x[3]
8040                                 pkg_merge = ordered and pkg_status == "merge"
8041                                 if not pkg_merge and pkg_status == "merge":
8042                                         pkg_status = "nomerge"
8043                                 built = pkg_type != "ebuild"
8044                                 installed = pkg_type == "installed"
8045                                 pkg = x
8046                                 metadata = pkg.metadata
8047                                 ebuild_path = None
8048                                 repo_name = metadata["repository"]
8049                                 if pkg_type == "ebuild":
8050                                         ebuild_path = portdb.findname(pkg_key)
8051                                         if not ebuild_path: # shouldn't happen
8052                                                 raise portage.exception.PackageNotFound(pkg_key)
8053                                         repo_path_real = os.path.dirname(os.path.dirname(
8054                                                 os.path.dirname(ebuild_path)))
8055                                 else:
8056                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8057                                 pkg_use = list(pkg.use.enabled)
8058                                 try:
8059                                         restrict = flatten(use_reduce(paren_reduce(
8060                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8061                                 except portage.exception.InvalidDependString, e:
8062                                         if not pkg.installed:
8063                                                 show_invalid_depstring_notice(x,
8064                                                         pkg.metadata["RESTRICT"], str(e))
8065                                                 del e
8066                                                 return 1
8067                                         restrict = []
8068                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8069                                         "fetch" in restrict:
8070                                         fetch = red("F")
8071                                         if ordered:
8072                                                 counters.restrict_fetch += 1
8073                                         if portdb.fetch_check(pkg_key, pkg_use):
8074                                                 fetch = green("f")
8075                                                 if ordered:
8076                                                         counters.restrict_fetch_satisfied += 1
8077
8078                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8079                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8080                                 myoldbest = []
8081                                 myinslotlist = None
8082                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8083                                 if vardb.cpv_exists(pkg_key):
8084                                         addl="  "+yellow("R")+fetch+"  "
8085                                         if ordered:
8086                                                 if pkg_merge:
8087                                                         counters.reinst += 1
8088                                                 elif pkg_status == "uninstall":
8089                                                         counters.uninst += 1
8090                                 # filter out old-style virtual matches
8091                                 elif installed_versions and \
8092                                         portage.cpv_getkey(installed_versions[0]) == \
8093                                         portage.cpv_getkey(pkg_key):
8094                                         myinslotlist = vardb.match(pkg.slot_atom)
8095                                         # If this is the first install of a new-style virtual, we
8096                                         # need to filter out old-style virtual matches.
8097                                         if myinslotlist and \
8098                                                 portage.cpv_getkey(myinslotlist[0]) != \
8099                                                 portage.cpv_getkey(pkg_key):
8100                                                 myinslotlist = None
8101                                         if myinslotlist:
8102                                                 myoldbest = myinslotlist[:]
8103                                                 addl = "   " + fetch
8104                                                 if not portage.dep.cpvequal(pkg_key,
8105                                                         portage.best([pkg_key] + myoldbest)):
8106                                                         # Downgrade in slot
8107                                                         addl += turquoise("U")+blue("D")
8108                                                         if ordered:
8109                                                                 counters.downgrades += 1
8110                                                 else:
8111                                                         # Update in slot
8112                                                         addl += turquoise("U") + " "
8113                                                         if ordered:
8114                                                                 counters.upgrades += 1
8115                                         else:
8116                                                 # New slot, mark it new.
8117                                                 addl = " " + green("NS") + fetch + "  "
8118                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8119                                                 if ordered:
8120                                                         counters.newslot += 1
8121
8122                                         if "--changelog" in self.myopts:
8123                                                 inst_matches = vardb.match(pkg.slot_atom)
8124                                                 if inst_matches:
8125                                                         changelogs.extend(self.calc_changelog(
8126                                                                 portdb.findname(pkg_key),
8127                                                                 inst_matches[0], pkg_key))
8128                                 else:
8129                                         addl = " " + green("N") + " " + fetch + "  "
8130                                         if ordered:
8131                                                 counters.new += 1
8132
8133                                 verboseadd = ""
8134                                 repoadd = None
8135
8136                                 if True:
8137                                         # USE flag display
8138                                         forced_flags = set()
8139                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8140                                         forced_flags.update(pkgsettings.useforce)
8141                                         forced_flags.update(pkgsettings.usemask)
8142
8143                                         cur_use = [flag for flag in pkg.use.enabled \
8144                                                 if flag in pkg.iuse.all]
8145                                         cur_iuse = sorted(pkg.iuse.all)
8146
8147                                         if myoldbest and myinslotlist:
8148                                                 previous_cpv = myoldbest[0]
8149                                         else:
8150                                                 previous_cpv = pkg.cpv
8151                                         if vardb.cpv_exists(previous_cpv):
8152                                                 old_iuse, old_use = vardb.aux_get(
8153                                                                 previous_cpv, ["IUSE", "USE"])
8154                                                 old_iuse = list(set(
8155                                                         filter_iuse_defaults(old_iuse.split())))
8156                                                 old_iuse.sort()
8157                                                 old_use = old_use.split()
8158                                                 is_new = False
8159                                         else:
8160                                                 old_iuse = []
8161                                                 old_use = []
8162                                                 is_new = True
8163
8164                                         old_use = [flag for flag in old_use if flag in old_iuse]
8165
8166                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8167                                         use_expand.sort()
8168                                         use_expand.reverse()
8169                                         use_expand_hidden = \
8170                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8171
8172                                         def map_to_use_expand(myvals, forcedFlags=False,
8173                                                 removeHidden=True):
8174                                                 ret = {}
8175                                                 forced = {}
8176                                                 for exp in use_expand:
8177                                                         ret[exp] = []
8178                                                         forced[exp] = set()
8179                                                         for val in myvals[:]:
8180                                                                 if val.startswith(exp.lower()+"_"):
8181                                                                         if val in forced_flags:
8182                                                                                 forced[exp].add(val[len(exp)+1:])
8183                                                                         ret[exp].append(val[len(exp)+1:])
8184                                                                         myvals.remove(val)
8185                                                 ret["USE"] = myvals
8186                                                 forced["USE"] = [val for val in myvals \
8187                                                         if val in forced_flags]
8188                                                 if removeHidden:
8189                                                         for exp in use_expand_hidden:
8190                                                                 ret.pop(exp, None)
8191                                                 if forcedFlags:
8192                                                         return ret, forced
8193                                                 return ret
8194
8195                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8196                                         # are the only thing that triggered reinstallation.
8197                                         reinst_flags_map = {}
8198                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8199                                         reinst_expand_map = None
8200                                         if reinstall_for_flags:
8201                                                 reinst_flags_map = map_to_use_expand(
8202                                                         list(reinstall_for_flags), removeHidden=False)
8203                                                 for k in list(reinst_flags_map):
8204                                                         if not reinst_flags_map[k]:
8205                                                                 del reinst_flags_map[k]
8206                                                 if not reinst_flags_map.get("USE"):
8207                                                         reinst_expand_map = reinst_flags_map.copy()
8208                                                         reinst_expand_map.pop("USE", None)
8209                                         if reinst_expand_map and \
8210                                                 not set(reinst_expand_map).difference(
8211                                                 use_expand_hidden):
8212                                                 use_expand_hidden = \
8213                                                         set(use_expand_hidden).difference(
8214                                                         reinst_expand_map)
8215
8216                                         cur_iuse_map, iuse_forced = \
8217                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8218                                         cur_use_map = map_to_use_expand(cur_use)
8219                                         old_iuse_map = map_to_use_expand(old_iuse)
8220                                         old_use_map = map_to_use_expand(old_use)
8221
8222                                         use_expand.sort()
8223                                         use_expand.insert(0, "USE")
8224                                         
8225                                         for key in use_expand:
8226                                                 if key in use_expand_hidden:
8227                                                         continue
8228                                                 verboseadd += create_use_string(key.upper(),
8229                                                         cur_iuse_map[key], iuse_forced[key],
8230                                                         cur_use_map[key], old_iuse_map[key],
8231                                                         old_use_map[key], is_new,
8232                                                         reinst_flags_map.get(key))
8233
8234                                 if verbosity == 3:
8235                                         # size verbose
8236                                         mysize=0
8237                                         if pkg_type == "ebuild" and pkg_merge:
8238                                                 try:
8239                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8240                                                                 useflags=pkg_use, debug=self.edebug)
8241                                                 except portage.exception.InvalidDependString, e:
8242                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8243                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8244                                                         del e
8245                                                         return 1
8246                                                 if myfilesdict is None:
8247                                                         myfilesdict="[empty/missing/bad digest]"
8248                                                 else:
8249                                                         for myfetchfile in myfilesdict:
8250                                                                 if myfetchfile not in myfetchlist:
8251                                                                         mysize+=myfilesdict[myfetchfile]
8252                                                                         myfetchlist.append(myfetchfile)
8253                                                         if ordered:
8254                                                                 counters.totalsize += mysize
8255                                                 verboseadd += format_size(mysize)
8256
8257                                         # overlay verbose
8258                                         # assign index for a previous version in the same slot
8259                                         has_previous = False
8260                                         repo_name_prev = None
8261                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8262                                                 metadata["SLOT"])
8263                                         slot_matches = vardb.match(slot_atom)
8264                                         if slot_matches:
8265                                                 has_previous = True
8266                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8267                                                         ["repository"])[0]
8268
8269                                         # now use the data to generate output
8270                                         if pkg.installed or not has_previous:
8271                                                 repoadd = repo_display.repoStr(repo_path_real)
8272                                         else:
8273                                                 repo_path_prev = None
8274                                                 if repo_name_prev:
8275                                                         repo_path_prev = portdb.getRepositoryPath(
8276                                                                 repo_name_prev)
8277                                                 if repo_path_prev == repo_path_real:
8278                                                         repoadd = repo_display.repoStr(repo_path_real)
8279                                                 else:
8280                                                         repoadd = "%s=>%s" % (
8281                                                                 repo_display.repoStr(repo_path_prev),
8282                                                                 repo_display.repoStr(repo_path_real))
8283                                         if repoadd:
8284                                                 repoadd_set.add(repoadd)
8285
8286                                 xs = [portage.cpv_getkey(pkg_key)] + \
8287                                         list(portage.catpkgsplit(pkg_key)[2:])
8288                                 if xs[2] == "r0":
8289                                         xs[2] = ""
8290                                 else:
8291                                         xs[2] = "-" + xs[2]
8292
8293                                 mywidth = 130
8294                                 if "COLUMNWIDTH" in self.settings:
8295                                         try:
8296                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8297                                         except ValueError, e:
8298                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8299                                                 portage.writemsg(
8300                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8301                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8302                                                 del e
8303                                 oldlp = mywidth - 30
8304                                 newlp = oldlp - 30
8305
8306                                 # Convert myoldbest from a list to a string.
8307                                 if not myoldbest:
8308                                         myoldbest = ""
8309                                 else:
8310                                         for pos, key in enumerate(myoldbest):
8311                                                 key = portage.catpkgsplit(key)[2] + \
8312                                                         "-" + portage.catpkgsplit(key)[3]
8313                                                 if key[-3:] == "-r0":
8314                                                         key = key[:-3]
8315                                                 myoldbest[pos] = key
8316                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8317
8318                                 pkg_cp = xs[0]
8319                                 root_config = self.roots[myroot]
8320                                 system_set = root_config.sets["system"]
8321                                 world_set  = root_config.sets["world"]
8322
8323                                 pkg_system = False
8324                                 pkg_world = False
8325                                 try:
8326                                         pkg_system = system_set.findAtomForPackage(pkg)
8327                                         pkg_world  = world_set.findAtomForPackage(pkg)
8328                                         if not (oneshot or pkg_world) and \
8329                                                 myroot == self.target_root and \
8330                                                 favorites_set.findAtomForPackage(pkg):
8331                                                 # Maybe it will be added to world now.
8332                                                 if create_world_atom(pkg, favorites_set, root_config):
8333                                                         pkg_world = True
8334                                 except portage.exception.InvalidDependString:
8335                                         # This is reported elsewhere if relevant.
8336                                         pass
8337
8338                                 def pkgprint(pkg_str):
8339                                         if pkg_merge:
8340                                                 if pkg_system:
8341                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8342                                                 elif pkg_world:
8343                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8344                                                 else:
8345                                                         return colorize("PKG_MERGE", pkg_str)
8346                                         elif pkg_status == "uninstall":
8347                                                 return colorize("PKG_UNINSTALL", pkg_str)
8348                                         else:
8349                                                 if pkg_system:
8350                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8351                                                 elif pkg_world:
8352                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8353                                                 else:
8354                                                         return colorize("PKG_NOMERGE", pkg_str)
8355
8356                                 try:
8357                                         properties = flatten(use_reduce(paren_reduce(
8358                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8359                                 except portage.exception.InvalidDependString, e:
8360                                         if not pkg.installed:
8361                                                 show_invalid_depstring_notice(pkg,
8362                                                         pkg.metadata["PROPERTIES"], str(e))
8363                                                 del e
8364                                                 return 1
8365                                         properties = []
8366                                 interactive = "interactive" in properties
8367                                 if interactive and pkg.operation == "merge":
8368                                         addl = colorize("WARN", "I") + addl[1:]
8369                                         if ordered:
8370                                                 counters.interactive += 1
8371
8372                                 if x[1]!="/":
8373                                         if myoldbest:
8374                                                 myoldbest +=" "
8375                                         if "--columns" in self.myopts:
8376                                                 if "--quiet" in self.myopts:
8377                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8378                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8379                                                         myprint=myprint+myoldbest
8380                                                         myprint=myprint+darkgreen("to "+x[1])
8381                                                         verboseadd = None
8382                                                 else:
8383                                                         if not pkg_merge:
8384                                                                 myprint = "[%s] %s%s" % \
8385                                                                         (pkgprint(pkg_status.ljust(13)),
8386                                                                         indent, pkgprint(pkg.cp))
8387                                                         else:
8388                                                                 myprint = "[%s %s] %s%s" % \
8389                                                                         (pkgprint(pkg.type_name), addl,
8390                                                                         indent, pkgprint(pkg.cp))
8391                                                         if (newlp-nc_len(myprint)) > 0:
8392                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8393                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8394                                                         if (oldlp-nc_len(myprint)) > 0:
8395                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8396                                                         myprint=myprint+myoldbest
8397                                                         myprint += darkgreen("to " + pkg.root)
8398                                         else:
8399                                                 if not pkg_merge:
8400                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8401                                                 else:
8402                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8403                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8404                                                         myoldbest + darkgreen("to " + myroot)
8405                                 else:
8406                                         if "--columns" in self.myopts:
8407                                                 if "--quiet" in self.myopts:
8408                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8409                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8410                                                         myprint=myprint+myoldbest
8411                                                         verboseadd = None
8412                                                 else:
8413                                                         if not pkg_merge:
8414                                                                 myprint = "[%s] %s%s" % \
8415                                                                         (pkgprint(pkg_status.ljust(13)),
8416                                                                         indent, pkgprint(pkg.cp))
8417                                                         else:
8418                                                                 myprint = "[%s %s] %s%s" % \
8419                                                                         (pkgprint(pkg.type_name), addl,
8420                                                                         indent, pkgprint(pkg.cp))
8421                                                         if (newlp-nc_len(myprint)) > 0:
8422                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8423                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8424                                                         if (oldlp-nc_len(myprint)) > 0:
8425                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8426                                                         myprint += myoldbest
8427                                         else:
8428                                                 if not pkg_merge:
8429                                                         myprint = "[%s] %s%s %s" % \
8430                                                                 (pkgprint(pkg_status.ljust(13)),
8431                                                                 indent, pkgprint(pkg.cpv),
8432                                                                 myoldbest)
8433                                                 else:
8434                                                         myprint = "[%s %s] %s%s %s" % \
8435                                                                 (pkgprint(pkg_type), addl, indent,
8436                                                                 pkgprint(pkg.cpv), myoldbest)
8437
8438                                 if columns and pkg.operation == "uninstall":
8439                                         continue
8440                                 p.append((myprint, verboseadd, repoadd))
8441
8442                                 if "--tree" not in self.myopts and \
8443                                         "--quiet" not in self.myopts and \
8444                                         not self._opts_no_restart.intersection(self.myopts) and \
8445                                         pkg.root == self._running_root.root and \
8446                                         portage.match_from_list(
8447                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8448                                         not vardb.cpv_exists(pkg.cpv) and \
8449                                         "--quiet" not in self.myopts:
8450                                                 if mylist_index < len(mylist) - 1:
8451                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8452                                                         p.append(colorize("WARN", "    then resume the merge."))
8453
8454                 out = sys.stdout
8455                 show_repos = repoadd_set and repoadd_set != set(["0"])
8456
8457                 for x in p:
8458                         if isinstance(x, basestring):
8459                                 out.write("%s\n" % (x,))
8460                                 continue
8461
8462                         myprint, verboseadd, repoadd = x
8463
8464                         if verboseadd:
8465                                 myprint += " " + verboseadd
8466
8467                         if show_repos and repoadd:
8468                                 myprint += " " + teal("[%s]" % repoadd)
8469
8470                         out.write("%s\n" % (myprint,))
8471
8472                 for x in blockers:
8473                         print x
8474
8475                 if verbosity == 3:
8476                         print
8477                         print counters
8478                         if show_repos:
8479                                 sys.stdout.write(str(repo_display))
8480
8481                 if "--changelog" in self.myopts:
8482                         print
8483                         for revision,text in changelogs:
8484                                 print bold('*'+revision)
8485                                 sys.stdout.write(text)
8486
8487                 sys.stdout.flush()
8488                 return os.EX_OK
8489
8490         def display_problems(self):
8491                 """
8492                 Display problems with the dependency graph such as slot collisions.
8493                 This is called internally by display() to show the problems _after_
8494                 the merge list where it is most likely to be seen, but if display()
8495                 is not going to be called then this method should be called explicitly
8496                 to ensure that the user is notified of problems with the graph.
8497
8498                 All output goes to stderr, except for unsatisfied dependencies which
8499                 go to stdout for parsing by programs such as autounmask.
8500                 """
8501
8502                 # Note that show_masked_packages() sends it's output to
8503                 # stdout, and some programs such as autounmask parse the
8504                 # output in cases when emerge bails out. However, when
8505                 # show_masked_packages() is called for installed packages
8506                 # here, the message is a warning that is more appropriate
8507                 # to send to stderr, so temporarily redirect stdout to
8508                 # stderr. TODO: Fix output code so there's a cleaner way
8509                 # to redirect everything to stderr.
8510                 sys.stdout.flush()
8511                 sys.stderr.flush()
8512                 stdout = sys.stdout
8513                 try:
8514                         sys.stdout = sys.stderr
8515                         self._display_problems()
8516                 finally:
8517                         sys.stdout = stdout
8518                         sys.stdout.flush()
8519                         sys.stderr.flush()
8520
8521                 # This goes to stdout for parsing by programs like autounmask.
8522                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8523                         self._show_unsatisfied_dep(*pargs, **kwargs)
8524
8525         def _display_problems(self):
8526                 if self._circular_deps_for_display is not None:
8527                         self._show_circular_deps(
8528                                 self._circular_deps_for_display)
8529
8530                 # The user is only notified of a slot conflict if
8531                 # there are no unresolvable blocker conflicts.
8532                 if self._unsatisfied_blockers_for_display is not None:
8533                         self._show_unsatisfied_blockers(
8534                                 self._unsatisfied_blockers_for_display)
8535                 else:
8536                         self._show_slot_collision_notice()
8537
8538                 # TODO: Add generic support for "set problem" handlers so that
8539                 # the below warnings aren't special cases for world only.
8540
8541                 if self._missing_args:
8542                         world_problems = False
8543                         if "world" in self._sets:
8544                                 # Filter out indirect members of world (from nested sets)
8545                                 # since only direct members of world are desired here.
8546                                 world_set = self.roots[self.target_root].sets["world"]
8547                                 for arg, atom in self._missing_args:
8548                                         if arg.name == "world" and atom in world_set:
8549                                                 world_problems = True
8550                                                 break
8551
8552                         if world_problems:
8553                                 sys.stderr.write("\n!!! Problems have been " + \
8554                                         "detected with your world file\n")
8555                                 sys.stderr.write("!!! Please run " + \
8556                                         green("emaint --check world")+"\n\n")
8557
8558                 if self._missing_args:
8559                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8560                                 " Ebuilds for the following packages are either all\n")
8561                         sys.stderr.write(colorize("BAD", "!!!") + \
8562                                 " masked or don't exist:\n")
8563                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8564                                 self._missing_args) + "\n")
8565
8566                 if self._pprovided_args:
8567                         arg_refs = {}
8568                         for arg, atom in self._pprovided_args:
8569                                 if isinstance(arg, SetArg):
8570                                         parent = arg.name
8571                                         arg_atom = (atom, atom)
8572                                 else:
8573                                         parent = "args"
8574                                         arg_atom = (arg.arg, atom)
8575                                 refs = arg_refs.setdefault(arg_atom, [])
8576                                 if parent not in refs:
8577                                         refs.append(parent)
8578                         msg = []
8579                         msg.append(bad("\nWARNING: "))
8580                         if len(self._pprovided_args) > 1:
8581                                 msg.append("Requested packages will not be " + \
8582                                         "merged because they are listed in\n")
8583                         else:
8584                                 msg.append("A requested package will not be " + \
8585                                         "merged because it is listed in\n")
8586                         msg.append("package.provided:\n\n")
8587                         problems_sets = set()
8588                         for (arg, atom), refs in arg_refs.iteritems():
8589                                 ref_string = ""
8590                                 if refs:
8591                                         problems_sets.update(refs)
8592                                         refs.sort()
8593                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8594                                         ref_string = " pulled in by " + ref_string
8595                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8596                         msg.append("\n")
8597                         if "world" in problems_sets:
8598                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8599                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8600                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8601                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8602                                 msg.append("The best course of action depends on the reason that an offending\n")
8603                                 msg.append("package.provided entry exists.\n\n")
8604                         sys.stderr.write("".join(msg))
8605
8606                 masked_packages = []
8607                 for pkg in self._masked_installed:
8608                         root_config = pkg.root_config
8609                         pkgsettings = self.pkgsettings[pkg.root]
8610                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8611                         masked_packages.append((root_config, pkgsettings,
8612                                 pkg.cpv, pkg.metadata, mreasons))
8613                 if masked_packages:
8614                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8615                                 " The following installed packages are masked:\n")
8616                         show_masked_packages(masked_packages)
8617                         show_mask_docs()
8618                         print
8619
8620         def calc_changelog(self,ebuildpath,current,next):
8621                 if ebuildpath == None or not os.path.exists(ebuildpath):
8622                         return []
8623                 current = '-'.join(portage.catpkgsplit(current)[1:])
8624                 if current.endswith('-r0'):
8625                         current = current[:-3]
8626                 next = '-'.join(portage.catpkgsplit(next)[1:])
8627                 if next.endswith('-r0'):
8628                         next = next[:-3]
8629                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8630                 try:
8631                         changelog = open(changelogpath).read()
8632                 except SystemExit, e:
8633                         raise # Needed else can't exit
8634                 except:
8635                         return []
8636                 divisions = self.find_changelog_tags(changelog)
8637                 #print 'XX from',current,'to',next
8638                 #for div,text in divisions: print 'XX',div
8639                 # skip entries for all revisions above the one we are about to emerge
8640                 for i in range(len(divisions)):
8641                         if divisions[i][0]==next:
8642                                 divisions = divisions[i:]
8643                                 break
8644                 # find out how many entries we are going to display
8645                 for i in range(len(divisions)):
8646                         if divisions[i][0]==current:
8647                                 divisions = divisions[:i]
8648                                 break
8649                 else:
8650                     # couldnt find the current revision in the list. display nothing
8651                         return []
8652                 return divisions
8653
8654         def find_changelog_tags(self,changelog):
8655                 divs = []
8656                 release = None
8657                 while 1:
8658                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8659                         if match is None:
8660                                 if release is not None:
8661                                         divs.append((release,changelog))
8662                                 return divs
8663                         if release is not None:
8664                                 divs.append((release,changelog[:match.start()]))
8665                         changelog = changelog[match.end():]
8666                         release = match.group(1)
8667                         if release.endswith('.ebuild'):
8668                                 release = release[:-7]
8669                         if release.endswith('-r0'):
8670                                 release = release[:-3]
8671
8672         def saveNomergeFavorites(self):
8673                 """Find atoms in favorites that are not in the mergelist and add them
8674                 to the world file if necessary."""
8675                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8676                         "--oneshot", "--onlydeps", "--pretend"):
8677                         if x in self.myopts:
8678                                 return
8679                 root_config = self.roots[self.target_root]
8680                 world_set = root_config.sets["world"]
8681
8682                 world_locked = False
8683                 if hasattr(world_set, "lock"):
8684                         world_set.lock()
8685                         world_locked = True
8686
8687                 if hasattr(world_set, "load"):
8688                         world_set.load() # maybe it's changed on disk
8689
8690                 args_set = self._sets["args"]
8691                 portdb = self.trees[self.target_root]["porttree"].dbapi
8692                 added_favorites = set()
8693                 for x in self._set_nodes:
8694                         pkg_type, root, pkg_key, pkg_status = x
8695                         if pkg_status != "nomerge":
8696                                 continue
8697
8698                         try:
8699                                 myfavkey = create_world_atom(x, args_set, root_config)
8700                                 if myfavkey:
8701                                         if myfavkey in added_favorites:
8702                                                 continue
8703                                         added_favorites.add(myfavkey)
8704                         except portage.exception.InvalidDependString, e:
8705                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8706                                         (pkg_key, str(e)), noiselevel=-1)
8707                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8708                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8709                                 del e
8710                 all_added = []
8711                 for k in self._sets:
8712                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8713                                 continue
8714                         s = SETPREFIX + k
8715                         if s in world_set:
8716                                 continue
8717                         all_added.append(SETPREFIX + k)
8718                 all_added.extend(added_favorites)
8719                 all_added.sort()
8720                 for a in all_added:
8721                         print ">>> Recording %s in \"world\" favorites file..." % \
8722                                 colorize("INFORM", str(a))
8723                 if all_added:
8724                         world_set.update(all_added)
8725
8726                 if world_locked:
8727                         world_set.unlock()
8728
8729         def loadResumeCommand(self, resume_data, skip_masked=False):
8730                 """
8731                 Add a resume command to the graph and validate it in the process.  This
8732                 will raise a PackageNotFound exception if a package is not available.
8733                 """
8734
8735                 if not isinstance(resume_data, dict):
8736                         return False
8737
8738                 mergelist = resume_data.get("mergelist")
8739                 if not isinstance(mergelist, list):
8740                         mergelist = []
8741
8742                 fakedb = self.mydbapi
8743                 trees = self.trees
8744                 serialized_tasks = []
8745                 masked_tasks = []
8746                 for x in mergelist:
8747                         if not (isinstance(x, list) and len(x) == 4):
8748                                 continue
8749                         pkg_type, myroot, pkg_key, action = x
8750                         if pkg_type not in self.pkg_tree_map:
8751                                 continue
8752                         if action != "merge":
8753                                 continue
8754                         tree_type = self.pkg_tree_map[pkg_type]
8755                         mydb = trees[myroot][tree_type].dbapi
8756                         db_keys = list(self._trees_orig[myroot][
8757                                 tree_type].dbapi._aux_cache_keys)
8758                         try:
8759                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8760                         except KeyError:
8761                                 # It does no exist or it is corrupt.
8762                                 if action == "uninstall":
8763                                         continue
8764                                 raise portage.exception.PackageNotFound(pkg_key)
8765                         installed = action == "uninstall"
8766                         built = pkg_type != "ebuild"
8767                         root_config = self.roots[myroot]
8768                         pkg = Package(built=built, cpv=pkg_key,
8769                                 installed=installed, metadata=metadata,
8770                                 operation=action, root_config=root_config,
8771                                 type_name=pkg_type)
8772                         if pkg_type == "ebuild":
8773                                 pkgsettings = self.pkgsettings[myroot]
8774                                 pkgsettings.setcpv(pkg)
8775                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8776                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8777                         self._pkg_cache[pkg] = pkg
8778
8779                         root_config = self.roots[pkg.root]
8780                         if "merge" == pkg.operation and \
8781                                 not visible(root_config.settings, pkg):
8782                                 if skip_masked:
8783                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8784                                 else:
8785                                         self._unsatisfied_deps_for_display.append(
8786                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8787
8788                         fakedb[myroot].cpv_inject(pkg)
8789                         serialized_tasks.append(pkg)
8790                         self.spinner.update()
8791
8792                 if self._unsatisfied_deps_for_display:
8793                         return False
8794
8795                 if not serialized_tasks or "--nodeps" in self.myopts:
8796                         self._serialized_tasks_cache = serialized_tasks
8797                         self._scheduler_graph = self.digraph
8798                 else:
8799                         self._select_package = self._select_pkg_from_graph
8800                         self.myparams.add("selective")
8801                         # Always traverse deep dependencies in order to account for
8802                         # potentially unsatisfied dependencies of installed packages.
8803                         # This is necessary for correct --keep-going or --resume operation
8804                         # in case a package from a group of circularly dependent packages
8805                         # fails. In this case, a package which has recently been installed
8806                         # may have an unsatisfied circular dependency (pulled in by
8807                         # PDEPEND, for example). So, even though a package is already
8808                         # installed, it may not have all of it's dependencies satisfied, so
8809                         # it may not be usable. If such a package is in the subgraph of
8810                         # deep depenedencies of a scheduled build, that build needs to
8811                         # be cancelled. In order for this type of situation to be
8812                         # recognized, deep traversal of dependencies is required.
8813                         self.myparams.add("deep")
8814
8815                         favorites = resume_data.get("favorites")
8816                         args_set = self._sets["args"]
8817                         if isinstance(favorites, list):
8818                                 args = self._load_favorites(favorites)
8819                         else:
8820                                 args = []
8821
8822                         for task in serialized_tasks:
8823                                 if isinstance(task, Package) and \
8824                                         task.operation == "merge":
8825                                         if not self._add_pkg(task, None):
8826                                                 return False
8827
8828                         # Packages for argument atoms need to be explicitly
8829                         # added via _add_pkg() so that they are included in the
8830                         # digraph (needed at least for --tree display).
8831                         for arg in args:
8832                                 for atom in arg.set:
8833                                         pkg, existing_node = self._select_package(
8834                                                 arg.root_config.root, atom)
8835                                         if existing_node is None and \
8836                                                 pkg is not None:
8837                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8838                                                         root=pkg.root, parent=arg)):
8839                                                         return False
8840
8841                         # Allow unsatisfied deps here to avoid showing a masking
8842                         # message for an unsatisfied dep that isn't necessarily
8843                         # masked.
8844                         if not self._create_graph(allow_unsatisfied=True):
8845                                 return False
8846
8847                         unsatisfied_deps = []
8848                         for dep in self._unsatisfied_deps:
8849                                 if not isinstance(dep.parent, Package):
8850                                         continue
8851                                 if dep.parent.operation == "merge":
8852                                         unsatisfied_deps.append(dep)
8853                                         continue
8854
8855                                 # For unsatisfied deps of installed packages, only account for
8856                                 # them if they are in the subgraph of dependencies of a package
8857                                 # which is scheduled to be installed.
8858                                 unsatisfied_install = False
8859                                 traversed = set()
8860                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8861                                 while dep_stack:
8862                                         node = dep_stack.pop()
8863                                         if not isinstance(node, Package):
8864                                                 continue
8865                                         if node.operation == "merge":
8866                                                 unsatisfied_install = True
8867                                                 break
8868                                         if node in traversed:
8869                                                 continue
8870                                         traversed.add(node)
8871                                         dep_stack.extend(self.digraph.parent_nodes(node))
8872
8873                                 if unsatisfied_install:
8874                                         unsatisfied_deps.append(dep)
8875
8876                         if masked_tasks or unsatisfied_deps:
8877                                 # This probably means that a required package
8878                                 # was dropped via --skipfirst. It makes the
8879                                 # resume list invalid, so convert it to a
8880                                 # UnsatisfiedResumeDep exception.
8881                                 raise self.UnsatisfiedResumeDep(self,
8882                                         masked_tasks + unsatisfied_deps)
8883                         self._serialized_tasks_cache = None
8884                         try:
8885                                 self.altlist()
8886                         except self._unknown_internal_error:
8887                                 return False
8888
8889                 return True
8890
8891         def _load_favorites(self, favorites):
8892                 """
8893                 Use a list of favorites to resume state from a
8894                 previous select_files() call. This creates similar
8895                 DependencyArg instances to those that would have
8896                 been created by the original select_files() call.
8897                 This allows Package instances to be matched with
8898                 DependencyArg instances during graph creation.
8899                 """
8900                 root_config = self.roots[self.target_root]
8901                 getSetAtoms = root_config.setconfig.getSetAtoms
8902                 sets = root_config.sets
8903                 args = []
8904                 for x in favorites:
8905                         if not isinstance(x, basestring):
8906                                 continue
8907                         if x in ("system", "world"):
8908                                 x = SETPREFIX + x
8909                         if x.startswith(SETPREFIX):
8910                                 s = x[len(SETPREFIX):]
8911                                 if s not in sets:
8912                                         continue
8913                                 if s in self._sets:
8914                                         continue
8915                                 # Recursively expand sets so that containment tests in
8916                                 # self._get_parent_sets() properly match atoms in nested
8917                                 # sets (like if world contains system).
8918                                 expanded_set = InternalPackageSet(
8919                                         initial_atoms=getSetAtoms(s))
8920                                 self._sets[s] = expanded_set
8921                                 args.append(SetArg(arg=x, set=expanded_set,
8922                                         root_config=root_config))
8923                         else:
8924                                 if not portage.isvalidatom(x):
8925                                         continue
8926                                 args.append(AtomArg(arg=x, atom=x,
8927                                         root_config=root_config))
8928
8929                 self._set_args(args)
8930                 return args
8931
8932         class UnsatisfiedResumeDep(portage.exception.PortageException):
8933                 """
8934                 A dependency of a resume list is not installed. This
8935                 can occur when a required package is dropped from the
8936                 merge list via --skipfirst.
8937                 """
8938                 def __init__(self, depgraph, value):
8939                         portage.exception.PortageException.__init__(self, value)
8940                         self.depgraph = depgraph
8941
8942         class _internal_exception(portage.exception.PortageException):
8943                 def __init__(self, value=""):
8944                         portage.exception.PortageException.__init__(self, value)
8945
8946         class _unknown_internal_error(_internal_exception):
8947                 """
8948                 Used by the depgraph internally to terminate graph creation.
8949                 The specific reason for the failure should have been dumped
8950                 to stderr, unfortunately, the exact reason for the failure
8951                 may not be known.
8952                 """
8953
8954         class _serialize_tasks_retry(_internal_exception):
8955                 """
8956                 This is raised by the _serialize_tasks() method when it needs to
8957                 be called again for some reason. The only case that it's currently
8958                 used for is when neglected dependencies need to be added to the
8959                 graph in order to avoid making a potentially unsafe decision.
8960                 """
8961
8962         class _dep_check_composite_db(portage.dbapi):
8963                 """
8964                 A dbapi-like interface that is optimized for use in dep_check() calls.
8965                 This is built on top of the existing depgraph package selection logic.
8966                 Some packages that have been added to the graph may be masked from this
8967                 view in order to influence the atom preference selection that occurs
8968                 via dep_check().
8969                 """
8970                 def __init__(self, depgraph, root):
8971                         portage.dbapi.__init__(self)
8972                         self._depgraph = depgraph
8973                         self._root = root
8974                         self._match_cache = {}
8975                         self._cpv_pkg_map = {}
8976
8977                 def _clear_cache(self):
8978                         self._match_cache.clear()
8979                         self._cpv_pkg_map.clear()
8980
8981                 def match(self, atom):
8982                         ret = self._match_cache.get(atom)
8983                         if ret is not None:
8984                                 return ret[:]
8985                         orig_atom = atom
8986                         if "/" not in atom:
8987                                 atom = self._dep_expand(atom)
8988                         pkg, existing = self._depgraph._select_package(self._root, atom)
8989                         if not pkg:
8990                                 ret = []
8991                         else:
8992                                 # Return the highest available from select_package() as well as
8993                                 # any matching slots in the graph db.
8994                                 slots = set()
8995                                 slots.add(pkg.metadata["SLOT"])
8996                                 atom_cp = portage.dep_getkey(atom)
8997                                 if pkg.cp.startswith("virtual/"):
8998                                         # For new-style virtual lookahead that occurs inside
8999                                         # dep_check(), examine all slots. This is needed
9000                                         # so that newer slots will not unnecessarily be pulled in
9001                                         # when a satisfying lower slot is already installed. For
9002                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9003                                         # there's no need to pull in a newer slot to satisfy a
9004                                         # virtual/jdk dependency.
9005                                         for db, pkg_type, built, installed, db_keys in \
9006                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9007                                                 for cpv in db.match(atom):
9008                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9009                                                                 continue
9010                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9011                                 ret = []
9012                                 if self._visible(pkg):
9013                                         self._cpv_pkg_map[pkg.cpv] = pkg
9014                                         ret.append(pkg.cpv)
9015                                 slots.remove(pkg.metadata["SLOT"])
9016                                 while slots:
9017                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9018                                         pkg, existing = self._depgraph._select_package(
9019                                                 self._root, slot_atom)
9020                                         if not pkg:
9021                                                 continue
9022                                         if not self._visible(pkg):
9023                                                 continue
9024                                         self._cpv_pkg_map[pkg.cpv] = pkg
9025                                         ret.append(pkg.cpv)
9026                                 if ret:
9027                                         self._cpv_sort_ascending(ret)
9028                         self._match_cache[orig_atom] = ret
9029                         return ret[:]
9030
9031                 def _visible(self, pkg):
9032                         if pkg.installed and "selective" not in self._depgraph.myparams:
9033                                 try:
9034                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9035                                 except (StopIteration, portage.exception.InvalidDependString):
9036                                         arg = None
9037                                 if arg:
9038                                         return False
9039                         if pkg.installed:
9040                                 try:
9041                                         if not visible(
9042                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9043                                                 return False
9044                                 except portage.exception.InvalidDependString:
9045                                         pass
9046                         in_graph = self._depgraph._slot_pkg_map[
9047                                 self._root].get(pkg.slot_atom)
9048                         if in_graph is None:
9049                                 # Mask choices for packages which are not the highest visible
9050                                 # version within their slot (since they usually trigger slot
9051                                 # conflicts).
9052                                 highest_visible, in_graph = self._depgraph._select_package(
9053                                         self._root, pkg.slot_atom)
9054                                 if pkg != highest_visible:
9055                                         return False
9056                         elif in_graph != pkg:
9057                                 # Mask choices for packages that would trigger a slot
9058                                 # conflict with a previously selected package.
9059                                 return False
9060                         return True
9061
9062                 def _dep_expand(self, atom):
9063                         """
9064                         This is only needed for old installed packages that may
9065                         contain atoms that are not fully qualified with a specific
9066                         category. Emulate the cpv_expand() function that's used by
9067                         dbapi.match() in cases like this. If there are multiple
9068                         matches, it's often due to a new-style virtual that has
9069                         been added, so try to filter those out to avoid raising
9070                         a ValueError.
9071                         """
9072                         root_config = self._depgraph.roots[self._root]
9073                         orig_atom = atom
9074                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9075                         if len(expanded_atoms) > 1:
9076                                 non_virtual_atoms = []
9077                                 for x in expanded_atoms:
9078                                         if not portage.dep_getkey(x).startswith("virtual/"):
9079                                                 non_virtual_atoms.append(x)
9080                                 if len(non_virtual_atoms) == 1:
9081                                         expanded_atoms = non_virtual_atoms
9082                         if len(expanded_atoms) > 1:
9083                                 # compatible with portage.cpv_expand()
9084                                 raise portage.exception.AmbiguousPackageName(
9085                                         [portage.dep_getkey(x) for x in expanded_atoms])
9086                         if expanded_atoms:
9087                                 atom = expanded_atoms[0]
9088                         else:
9089                                 null_atom = insert_category_into_atom(atom, "null")
9090                                 null_cp = portage.dep_getkey(null_atom)
9091                                 cat, atom_pn = portage.catsplit(null_cp)
9092                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9093                                 if virts_p:
9094                                         # Allow the resolver to choose which virtual.
9095                                         atom = insert_category_into_atom(atom, "virtual")
9096                                 else:
9097                                         atom = insert_category_into_atom(atom, "null")
9098                         return atom
9099
9100                 def aux_get(self, cpv, wants):
9101                         metadata = self._cpv_pkg_map[cpv].metadata
9102                         return [metadata.get(x, "") for x in wants]
9103
9104 class RepoDisplay(object):
9105         def __init__(self, roots):
9106                 self._shown_repos = {}
9107                 self._unknown_repo = False
9108                 repo_paths = set()
9109                 for root_config in roots.itervalues():
9110                         portdir = root_config.settings.get("PORTDIR")
9111                         if portdir:
9112                                 repo_paths.add(portdir)
9113                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9114                         if overlays:
9115                                 repo_paths.update(overlays.split())
9116                 repo_paths = list(repo_paths)
9117                 self._repo_paths = repo_paths
9118                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9119                         for repo_path in repo_paths ]
9120
9121                 # pre-allocate index for PORTDIR so that it always has index 0.
9122                 for root_config in roots.itervalues():
9123                         portdb = root_config.trees["porttree"].dbapi
9124                         portdir = portdb.porttree_root
9125                         if portdir:
9126                                 self.repoStr(portdir)
9127
9128         def repoStr(self, repo_path_real):
9129                 real_index = -1
9130                 if repo_path_real:
9131                         real_index = self._repo_paths_real.index(repo_path_real)
9132                 if real_index == -1:
9133                         s = "?"
9134                         self._unknown_repo = True
9135                 else:
9136                         shown_repos = self._shown_repos
9137                         repo_paths = self._repo_paths
9138                         repo_path = repo_paths[real_index]
9139                         index = shown_repos.get(repo_path)
9140                         if index is None:
9141                                 index = len(shown_repos)
9142                                 shown_repos[repo_path] = index
9143                         s = str(index)
9144                 return s
9145
9146         def __str__(self):
9147                 output = []
9148                 shown_repos = self._shown_repos
9149                 unknown_repo = self._unknown_repo
9150                 if shown_repos or self._unknown_repo:
9151                         output.append("Portage tree and overlays:\n")
9152                 show_repo_paths = list(shown_repos)
9153                 for repo_path, repo_index in shown_repos.iteritems():
9154                         show_repo_paths[repo_index] = repo_path
9155                 if show_repo_paths:
9156                         for index, repo_path in enumerate(show_repo_paths):
9157                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9158                 if unknown_repo:
9159                         output.append(" "+teal("[?]") + \
9160                                 " indicates that the source repository could not be determined\n")
9161                 return "".join(output)
9162
9163 class PackageCounters(object):
9164
9165         def __init__(self):
9166                 self.upgrades   = 0
9167                 self.downgrades = 0
9168                 self.new        = 0
9169                 self.newslot    = 0
9170                 self.reinst     = 0
9171                 self.uninst     = 0
9172                 self.blocks     = 0
9173                 self.blocks_satisfied         = 0
9174                 self.totalsize  = 0
9175                 self.restrict_fetch           = 0
9176                 self.restrict_fetch_satisfied = 0
9177                 self.interactive              = 0
9178
9179         def __str__(self):
9180                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9181                 myoutput = []
9182                 details = []
9183                 myoutput.append("Total: %s package" % total_installs)
9184                 if total_installs != 1:
9185                         myoutput.append("s")
9186                 if total_installs != 0:
9187                         myoutput.append(" (")
9188                 if self.upgrades > 0:
9189                         details.append("%s upgrade" % self.upgrades)
9190                         if self.upgrades > 1:
9191                                 details[-1] += "s"
9192                 if self.downgrades > 0:
9193                         details.append("%s downgrade" % self.downgrades)
9194                         if self.downgrades > 1:
9195                                 details[-1] += "s"
9196                 if self.new > 0:
9197                         details.append("%s new" % self.new)
9198                 if self.newslot > 0:
9199                         details.append("%s in new slot" % self.newslot)
9200                         if self.newslot > 1:
9201                                 details[-1] += "s"
9202                 if self.reinst > 0:
9203                         details.append("%s reinstall" % self.reinst)
9204                         if self.reinst > 1:
9205                                 details[-1] += "s"
9206                 if self.uninst > 0:
9207                         details.append("%s uninstall" % self.uninst)
9208                         if self.uninst > 1:
9209                                 details[-1] += "s"
9210                 if self.interactive > 0:
9211                         details.append("%s %s" % (self.interactive,
9212                                 colorize("WARN", "interactive")))
9213                 myoutput.append(", ".join(details))
9214                 if total_installs != 0:
9215                         myoutput.append(")")
9216                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9217                 if self.restrict_fetch:
9218                         myoutput.append("\nFetch Restriction: %s package" % \
9219                                 self.restrict_fetch)
9220                         if self.restrict_fetch > 1:
9221                                 myoutput.append("s")
9222                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9223                         myoutput.append(bad(" (%s unsatisfied)") % \
9224                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9225                 if self.blocks > 0:
9226                         myoutput.append("\nConflict: %s block" % \
9227                                 self.blocks)
9228                         if self.blocks > 1:
9229                                 myoutput.append("s")
9230                         if self.blocks_satisfied < self.blocks:
9231                                 myoutput.append(bad(" (%s unsatisfied)") % \
9232                                         (self.blocks - self.blocks_satisfied))
9233                 return "".join(myoutput)
9234
9235 class UseFlagDisplay(object):
9236
9237         __slots__ = ('name', 'enabled', 'forced')
9238
9239         def __init__(self, name, enabled, forced):
9240                 self.name = name
9241                 self.enabled = enabled
9242                 self.forced = forced
9243
9244         def __str__(self):
9245                 s = self.name
9246                 if self.enabled:
9247                         s = red(s)
9248                 else:
9249                         s = '-' + s
9250                         s = blue(s)
9251                 if self.forced:
9252                         s = '(%s)' % s
9253                 return s
9254
9255         @classmethod
9256         def cmp_combined(cls, a, b):
9257                 """
9258                 Sort by name, combining enabled and disabled flags.
9259                 """
9260                 return (a.name > b.name) - (a.name < b.name)
9261
9262         @classmethod
9263         def cmp_separated(cls, a, b):
9264                 """
9265                 Sort by name, separating enabled flags from disabled flags.
9266                 """
9267                 enabled_diff = b.enabled - a.enabled
9268                 if enabled_diff:
9269                         return enabled_diff
9270                 return (a.name > b.name) - (a.name < b.name)
9271
9272 class PollSelectAdapter(PollConstants):
9273
9274         """
9275         Use select to emulate a poll object, for
9276         systems that don't support poll().
9277         """
9278
9279         def __init__(self):
9280                 self._registered = {}
9281                 self._select_args = [[], [], []]
9282
9283         def register(self, fd, *args):
9284                 """
9285                 Only POLLIN is currently supported!
9286                 """
9287                 if len(args) > 1:
9288                         raise TypeError(
9289                                 "register expected at most 2 arguments, got " + \
9290                                 repr(1 + len(args)))
9291
9292                 eventmask = PollConstants.POLLIN | \
9293                         PollConstants.POLLPRI | PollConstants.POLLOUT
9294                 if args:
9295                         eventmask = args[0]
9296
9297                 self._registered[fd] = eventmask
9298                 self._select_args = None
9299
9300         def unregister(self, fd):
9301                 self._select_args = None
9302                 del self._registered[fd]
9303
9304         def poll(self, *args):
9305                 if len(args) > 1:
9306                         raise TypeError(
9307                                 "poll expected at most 2 arguments, got " + \
9308                                 repr(1 + len(args)))
9309
9310                 timeout = None
9311                 if args:
9312                         timeout = args[0]
9313
9314                 select_args = self._select_args
9315                 if select_args is None:
9316                         select_args = [self._registered.keys(), [], []]
9317
9318                 if timeout is not None:
9319                         select_args = select_args[:]
9320                         # Translate poll() timeout args to select() timeout args:
9321                         #
9322                         #          | units        | value(s) for indefinite block
9323                         # ---------|--------------|------------------------------
9324                         #   poll   | milliseconds | omitted, negative, or None
9325                         # ---------|--------------|------------------------------
9326                         #   select | seconds      | omitted
9327                         # ---------|--------------|------------------------------
9328
9329                         if timeout is not None and timeout < 0:
9330                                 timeout = None
9331                         if timeout is not None:
9332                                 select_args.append(timeout / 1000)
9333
9334                 select_events = select.select(*select_args)
9335                 poll_events = []
9336                 for fd in select_events[0]:
9337                         poll_events.append((fd, PollConstants.POLLIN))
9338                 return poll_events
9339
9340 class SequentialTaskQueue(SlotObject):
9341
9342         __slots__ = ("max_jobs", "running_tasks") + \
9343                 ("_dirty", "_scheduling", "_task_queue")
9344
9345         def __init__(self, **kwargs):
9346                 SlotObject.__init__(self, **kwargs)
9347                 self._task_queue = deque()
9348                 self.running_tasks = set()
9349                 if self.max_jobs is None:
9350                         self.max_jobs = 1
9351                 self._dirty = True
9352
9353         def add(self, task):
9354                 self._task_queue.append(task)
9355                 self._dirty = True
9356
9357         def addFront(self, task):
9358                 self._task_queue.appendleft(task)
9359                 self._dirty = True
9360
9361         def schedule(self):
9362
9363                 if not self._dirty:
9364                         return False
9365
9366                 if not self:
9367                         return False
9368
9369                 if self._scheduling:
9370                         # Ignore any recursive schedule() calls triggered via
9371                         # self._task_exit().
9372                         return False
9373
9374                 self._scheduling = True
9375
9376                 task_queue = self._task_queue
9377                 running_tasks = self.running_tasks
9378                 max_jobs = self.max_jobs
9379                 state_changed = False
9380
9381                 while task_queue and \
9382                         (max_jobs is True or len(running_tasks) < max_jobs):
9383                         task = task_queue.popleft()
9384                         cancelled = getattr(task, "cancelled", None)
9385                         if not cancelled:
9386                                 running_tasks.add(task)
9387                                 task.addExitListener(self._task_exit)
9388                                 task.start()
9389                         state_changed = True
9390
9391                 self._dirty = False
9392                 self._scheduling = False
9393
9394                 return state_changed
9395
9396         def _task_exit(self, task):
9397                 """
9398                 Since we can always rely on exit listeners being called, the set of
9399                 running tasks is always pruned automatically and there is never any need
9400                 to actively prune it.
9401                 """
9402                 self.running_tasks.remove(task)
9403                 if self._task_queue:
9404                         self._dirty = True
9405
9406         def clear(self):
9407                 self._task_queue.clear()
9408                 running_tasks = self.running_tasks
9409                 while running_tasks:
9410                         task = running_tasks.pop()
9411                         task.removeExitListener(self._task_exit)
9412                         task.cancel()
9413                 self._dirty = False
9414
9415         def __nonzero__(self):
9416                 return bool(self._task_queue or self.running_tasks)
9417
9418         def __len__(self):
9419                 return len(self._task_queue) + len(self.running_tasks)
9420
9421 _can_poll_device = None
9422
9423 def can_poll_device():
9424         """
9425         Test if it's possible to use poll() on a device such as a pty. This
9426         is known to fail on Darwin.
9427         @rtype: bool
9428         @returns: True if poll() on a device succeeds, False otherwise.
9429         """
9430
9431         global _can_poll_device
9432         if _can_poll_device is not None:
9433                 return _can_poll_device
9434
9435         if not hasattr(select, "poll"):
9436                 _can_poll_device = False
9437                 return _can_poll_device
9438
9439         try:
9440                 dev_null = open('/dev/null', 'rb')
9441         except IOError:
9442                 _can_poll_device = False
9443                 return _can_poll_device
9444
9445         p = select.poll()
9446         p.register(dev_null.fileno(), PollConstants.POLLIN)
9447
9448         invalid_request = False
9449         for f, event in p.poll():
9450                 if event & PollConstants.POLLNVAL:
9451                         invalid_request = True
9452                         break
9453         dev_null.close()
9454
9455         _can_poll_device = not invalid_request
9456         return _can_poll_device
9457
9458 def create_poll_instance():
9459         """
9460         Create an instance of select.poll, or an instance of
9461         PollSelectAdapter there is no poll() implementation or
9462         it is broken somehow.
9463         """
9464         if can_poll_device():
9465                 return select.poll()
9466         return PollSelectAdapter()
9467
9468 getloadavg = getattr(os, "getloadavg", None)
9469 if getloadavg is None:
9470         def getloadavg():
9471                 """
9472                 Uses /proc/loadavg to emulate os.getloadavg().
9473                 Raises OSError if the load average was unobtainable.
9474                 """
9475                 try:
9476                         loadavg_str = open('/proc/loadavg').readline()
9477                 except IOError:
9478                         # getloadavg() is only supposed to raise OSError, so convert
9479                         raise OSError('unknown')
9480                 loadavg_split = loadavg_str.split()
9481                 if len(loadavg_split) < 3:
9482                         raise OSError('unknown')
9483                 loadavg_floats = []
9484                 for i in xrange(3):
9485                         try:
9486                                 loadavg_floats.append(float(loadavg_split[i]))
9487                         except ValueError:
9488                                 raise OSError('unknown')
9489                 return tuple(loadavg_floats)
9490
9491 class PollScheduler(object):
9492
9493         class _sched_iface_class(SlotObject):
9494                 __slots__ = ("register", "schedule", "unregister")
9495
9496         def __init__(self):
9497                 self._max_jobs = 1
9498                 self._max_load = None
9499                 self._jobs = 0
9500                 self._poll_event_queue = []
9501                 self._poll_event_handlers = {}
9502                 self._poll_event_handler_ids = {}
9503                 # Increment id for each new handler.
9504                 self._event_handler_id = 0
9505                 self._poll_obj = create_poll_instance()
9506                 self._scheduling = False
9507
9508         def _schedule(self):
9509                 """
9510                 Calls _schedule_tasks() and automatically returns early from
9511                 any recursive calls to this method that the _schedule_tasks()
9512                 call might trigger. This makes _schedule() safe to call from
9513                 inside exit listeners.
9514                 """
9515                 if self._scheduling:
9516                         return False
9517                 self._scheduling = True
9518                 try:
9519                         return self._schedule_tasks()
9520                 finally:
9521                         self._scheduling = False
9522
9523         def _running_job_count(self):
9524                 return self._jobs
9525
9526         def _can_add_job(self):
9527                 max_jobs = self._max_jobs
9528                 max_load = self._max_load
9529
9530                 if self._max_jobs is not True and \
9531                         self._running_job_count() >= self._max_jobs:
9532                         return False
9533
9534                 if max_load is not None and \
9535                         (max_jobs is True or max_jobs > 1) and \
9536                         self._running_job_count() >= 1:
9537                         try:
9538                                 avg1, avg5, avg15 = getloadavg()
9539                         except OSError:
9540                                 return False
9541
9542                         if avg1 >= max_load:
9543                                 return False
9544
9545                 return True
9546
9547         def _poll(self, timeout=None):
9548                 """
9549                 All poll() calls pass through here. The poll events
9550                 are added directly to self._poll_event_queue.
9551                 In order to avoid endless blocking, this raises
9552                 StopIteration if timeout is None and there are
9553                 no file descriptors to poll.
9554                 """
9555                 if not self._poll_event_handlers:
9556                         self._schedule()
9557                         if timeout is None and \
9558                                 not self._poll_event_handlers:
9559                                 raise StopIteration(
9560                                         "timeout is None and there are no poll() event handlers")
9561
9562                 # The following error is known to occur with Linux kernel versions
9563                 # less than 2.6.24:
9564                 #
9565                 #   select.error: (4, 'Interrupted system call')
9566                 #
9567                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9568                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9569                 # without any events.
9570                 while True:
9571                         try:
9572                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9573                                 break
9574                         except select.error, e:
9575                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9576                                         level=logging.ERROR, noiselevel=-1)
9577                                 del e
9578                                 if timeout is not None:
9579                                         break
9580
9581         def _next_poll_event(self, timeout=None):
9582                 """
9583                 Since the _schedule_wait() loop is called by event
9584                 handlers from _poll_loop(), maintain a central event
9585                 queue for both of them to share events from a single
9586                 poll() call. In order to avoid endless blocking, this
9587                 raises StopIteration if timeout is None and there are
9588                 no file descriptors to poll.
9589                 """
9590                 if not self._poll_event_queue:
9591                         self._poll(timeout)
9592                 return self._poll_event_queue.pop()
9593
9594         def _poll_loop(self):
9595
9596                 event_handlers = self._poll_event_handlers
9597                 event_handled = False
9598
9599                 try:
9600                         while event_handlers:
9601                                 f, event = self._next_poll_event()
9602                                 handler, reg_id = event_handlers[f]
9603                                 handler(f, event)
9604                                 event_handled = True
9605                 except StopIteration:
9606                         event_handled = True
9607
9608                 if not event_handled:
9609                         raise AssertionError("tight loop")
9610
9611         def _schedule_yield(self):
9612                 """
9613                 Schedule for a short period of time chosen by the scheduler based
9614                 on internal state. Synchronous tasks should call this periodically
9615                 in order to allow the scheduler to service pending poll events. The
9616                 scheduler will call poll() exactly once, without blocking, and any
9617                 resulting poll events will be serviced.
9618                 """
9619                 event_handlers = self._poll_event_handlers
9620                 events_handled = 0
9621
9622                 if not event_handlers:
9623                         return bool(events_handled)
9624
9625                 if not self._poll_event_queue:
9626                         self._poll(0)
9627
9628                 try:
9629                         while event_handlers and self._poll_event_queue:
9630                                 f, event = self._next_poll_event()
9631                                 handler, reg_id = event_handlers[f]
9632                                 handler(f, event)
9633                                 events_handled += 1
9634                 except StopIteration:
9635                         events_handled += 1
9636
9637                 return bool(events_handled)
9638
9639         def _register(self, f, eventmask, handler):
9640                 """
9641                 @rtype: Integer
9642                 @return: A unique registration id, for use in schedule() or
9643                         unregister() calls.
9644                 """
9645                 if f in self._poll_event_handlers:
9646                         raise AssertionError("fd %d is already registered" % f)
9647                 self._event_handler_id += 1
9648                 reg_id = self._event_handler_id
9649                 self._poll_event_handler_ids[reg_id] = f
9650                 self._poll_event_handlers[f] = (handler, reg_id)
9651                 self._poll_obj.register(f, eventmask)
9652                 return reg_id
9653
9654         def _unregister(self, reg_id):
9655                 f = self._poll_event_handler_ids[reg_id]
9656                 self._poll_obj.unregister(f)
9657                 del self._poll_event_handlers[f]
9658                 del self._poll_event_handler_ids[reg_id]
9659
9660         def _schedule_wait(self, wait_ids):
9661                 """
9662                 Schedule until wait_id is not longer registered
9663                 for poll() events.
9664                 @type wait_id: int
9665                 @param wait_id: a task id to wait for
9666                 """
9667                 event_handlers = self._poll_event_handlers
9668                 handler_ids = self._poll_event_handler_ids
9669                 event_handled = False
9670
9671                 if isinstance(wait_ids, int):
9672                         wait_ids = frozenset([wait_ids])
9673
9674                 try:
9675                         while wait_ids.intersection(handler_ids):
9676                                 f, event = self._next_poll_event()
9677                                 handler, reg_id = event_handlers[f]
9678                                 handler(f, event)
9679                                 event_handled = True
9680                 except StopIteration:
9681                         event_handled = True
9682
9683                 return event_handled
9684
9685 class QueueScheduler(PollScheduler):
9686
9687         """
9688         Add instances of SequentialTaskQueue and then call run(). The
9689         run() method returns when no tasks remain.
9690         """
9691
9692         def __init__(self, max_jobs=None, max_load=None):
9693                 PollScheduler.__init__(self)
9694
9695                 if max_jobs is None:
9696                         max_jobs = 1
9697
9698                 self._max_jobs = max_jobs
9699                 self._max_load = max_load
9700                 self.sched_iface = self._sched_iface_class(
9701                         register=self._register,
9702                         schedule=self._schedule_wait,
9703                         unregister=self._unregister)
9704
9705                 self._queues = []
9706                 self._schedule_listeners = []
9707
9708         def add(self, q):
9709                 self._queues.append(q)
9710
9711         def remove(self, q):
9712                 self._queues.remove(q)
9713
9714         def run(self):
9715
9716                 while self._schedule():
9717                         self._poll_loop()
9718
9719                 while self._running_job_count():
9720                         self._poll_loop()
9721
9722         def _schedule_tasks(self):
9723                 """
9724                 @rtype: bool
9725                 @returns: True if there may be remaining tasks to schedule,
9726                         False otherwise.
9727                 """
9728                 while self._can_add_job():
9729                         n = self._max_jobs - self._running_job_count()
9730                         if n < 1:
9731                                 break
9732
9733                         if not self._start_next_job(n):
9734                                 return False
9735
9736                 for q in self._queues:
9737                         if q:
9738                                 return True
9739                 return False
9740
9741         def _running_job_count(self):
9742                 job_count = 0
9743                 for q in self._queues:
9744                         job_count += len(q.running_tasks)
9745                 self._jobs = job_count
9746                 return job_count
9747
9748         def _start_next_job(self, n=1):
9749                 started_count = 0
9750                 for q in self._queues:
9751                         initial_job_count = len(q.running_tasks)
9752                         q.schedule()
9753                         final_job_count = len(q.running_tasks)
9754                         if final_job_count > initial_job_count:
9755                                 started_count += (final_job_count - initial_job_count)
9756                         if started_count >= n:
9757                                 break
9758                 return started_count
9759
9760 class TaskScheduler(object):
9761
9762         """
9763         A simple way to handle scheduling of AsynchrousTask instances. Simply
9764         add tasks and call run(). The run() method returns when no tasks remain.
9765         """
9766
9767         def __init__(self, max_jobs=None, max_load=None):
9768                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9769                 self._scheduler = QueueScheduler(
9770                         max_jobs=max_jobs, max_load=max_load)
9771                 self.sched_iface = self._scheduler.sched_iface
9772                 self.run = self._scheduler.run
9773                 self._scheduler.add(self._queue)
9774
9775         def add(self, task):
9776                 self._queue.add(task)
9777
9778 class JobStatusDisplay(object):
9779
9780         _bound_properties = ("curval", "failed", "running")
9781         _jobs_column_width = 48
9782
9783         # Don't update the display unless at least this much
9784         # time has passed, in units of seconds.
9785         _min_display_latency = 2
9786
9787         _default_term_codes = {
9788                 'cr'  : '\r',
9789                 'el'  : '\x1b[K',
9790                 'nel' : '\n',
9791         }
9792
9793         _termcap_name_map = {
9794                 'carriage_return' : 'cr',
9795                 'clr_eol'         : 'el',
9796                 'newline'         : 'nel',
9797         }
9798
9799         def __init__(self, out=sys.stdout, quiet=False):
9800                 object.__setattr__(self, "out", out)
9801                 object.__setattr__(self, "quiet", quiet)
9802                 object.__setattr__(self, "maxval", 0)
9803                 object.__setattr__(self, "merges", 0)
9804                 object.__setattr__(self, "_changed", False)
9805                 object.__setattr__(self, "_displayed", False)
9806                 object.__setattr__(self, "_last_display_time", 0)
9807                 object.__setattr__(self, "width", 80)
9808                 self.reset()
9809
9810                 isatty = hasattr(out, "isatty") and out.isatty()
9811                 object.__setattr__(self, "_isatty", isatty)
9812                 if not isatty or not self._init_term():
9813                         term_codes = {}
9814                         for k, capname in self._termcap_name_map.iteritems():
9815                                 term_codes[k] = self._default_term_codes[capname]
9816                         object.__setattr__(self, "_term_codes", term_codes)
9817                 encoding = sys.getdefaultencoding()
9818                 for k, v in self._term_codes.items():
9819                         if not isinstance(v, basestring):
9820                                 self._term_codes[k] = v.decode(encoding, 'replace')
9821
9822         def _init_term(self):
9823                 """
9824                 Initialize term control codes.
9825                 @rtype: bool
9826                 @returns: True if term codes were successfully initialized,
9827                         False otherwise.
9828                 """
9829
9830                 term_type = os.environ.get("TERM", "vt100")
9831                 tigetstr = None
9832
9833                 try:
9834                         import curses
9835                         try:
9836                                 curses.setupterm(term_type, self.out.fileno())
9837                                 tigetstr = curses.tigetstr
9838                         except curses.error:
9839                                 pass
9840                 except ImportError:
9841                         pass
9842
9843                 if tigetstr is None:
9844                         return False
9845
9846                 term_codes = {}
9847                 for k, capname in self._termcap_name_map.iteritems():
9848                         code = tigetstr(capname)
9849                         if code is None:
9850                                 code = self._default_term_codes[capname]
9851                         term_codes[k] = code
9852                 object.__setattr__(self, "_term_codes", term_codes)
9853                 return True
9854
9855         def _format_msg(self, msg):
9856                 return ">>> %s" % msg
9857
9858         def _erase(self):
9859                 self.out.write(
9860                         self._term_codes['carriage_return'] + \
9861                         self._term_codes['clr_eol'])
9862                 self.out.flush()
9863                 self._displayed = False
9864
9865         def _display(self, line):
9866                 self.out.write(line)
9867                 self.out.flush()
9868                 self._displayed = True
9869
9870         def _update(self, msg):
9871
9872                 out = self.out
9873                 if not self._isatty:
9874                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9875                         self.out.flush()
9876                         self._displayed = True
9877                         return
9878
9879                 if self._displayed:
9880                         self._erase()
9881
9882                 self._display(self._format_msg(msg))
9883
9884         def displayMessage(self, msg):
9885
9886                 was_displayed = self._displayed
9887
9888                 if self._isatty and self._displayed:
9889                         self._erase()
9890
9891                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9892                 self.out.flush()
9893                 self._displayed = False
9894
9895                 if was_displayed:
9896                         self._changed = True
9897                         self.display()
9898
9899         def reset(self):
9900                 self.maxval = 0
9901                 self.merges = 0
9902                 for name in self._bound_properties:
9903                         object.__setattr__(self, name, 0)
9904
9905                 if self._displayed:
9906                         self.out.write(self._term_codes['newline'])
9907                         self.out.flush()
9908                         self._displayed = False
9909
9910         def __setattr__(self, name, value):
9911                 old_value = getattr(self, name)
9912                 if value == old_value:
9913                         return
9914                 object.__setattr__(self, name, value)
9915                 if name in self._bound_properties:
9916                         self._property_change(name, old_value, value)
9917
9918         def _property_change(self, name, old_value, new_value):
9919                 self._changed = True
9920                 self.display()
9921
9922         def _load_avg_str(self):
9923                 try:
9924                         avg = getloadavg()
9925                 except OSError:
9926                         return 'unknown'
9927
9928                 max_avg = max(avg)
9929
9930                 if max_avg < 10:
9931                         digits = 2
9932                 elif max_avg < 100:
9933                         digits = 1
9934                 else:
9935                         digits = 0
9936
9937                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9938
9939         def display(self):
9940                 """
9941                 Display status on stdout, but only if something has
9942                 changed since the last call.
9943                 """
9944
9945                 if self.quiet:
9946                         return
9947
9948                 current_time = time.time()
9949                 time_delta = current_time - self._last_display_time
9950                 if self._displayed and \
9951                         not self._changed:
9952                         if not self._isatty:
9953                                 return
9954                         if time_delta < self._min_display_latency:
9955                                 return
9956
9957                 self._last_display_time = current_time
9958                 self._changed = False
9959                 self._display_status()
9960
9961         def _display_status(self):
9962                 # Don't use len(self._completed_tasks) here since that also
9963                 # can include uninstall tasks.
9964                 curval_str = str(self.curval)
9965                 maxval_str = str(self.maxval)
9966                 running_str = str(self.running)
9967                 failed_str = str(self.failed)
9968                 load_avg_str = self._load_avg_str()
9969
9970                 color_output = StringIO()
9971                 plain_output = StringIO()
9972                 style_file = portage.output.ConsoleStyleFile(color_output)
9973                 style_file.write_listener = plain_output
9974                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9975                 style_writer.style_listener = style_file.new_styles
9976                 f = formatter.AbstractFormatter(style_writer)
9977
9978                 number_style = "INFORM"
9979                 f.add_literal_data("Jobs: ")
9980                 f.push_style(number_style)
9981                 f.add_literal_data(curval_str)
9982                 f.pop_style()
9983                 f.add_literal_data(" of ")
9984                 f.push_style(number_style)
9985                 f.add_literal_data(maxval_str)
9986                 f.pop_style()
9987                 f.add_literal_data(" complete")
9988
9989                 if self.running:
9990                         f.add_literal_data(", ")
9991                         f.push_style(number_style)
9992                         f.add_literal_data(running_str)
9993                         f.pop_style()
9994                         f.add_literal_data(" running")
9995
9996                 if self.failed:
9997                         f.add_literal_data(", ")
9998                         f.push_style(number_style)
9999                         f.add_literal_data(failed_str)
10000                         f.pop_style()
10001                         f.add_literal_data(" failed")
10002
10003                 padding = self._jobs_column_width - len(plain_output.getvalue())
10004                 if padding > 0:
10005                         f.add_literal_data(padding * " ")
10006
10007                 f.add_literal_data("Load avg: ")
10008                 f.add_literal_data(load_avg_str)
10009
10010                 # Truncate to fit width, to avoid making the terminal scroll if the
10011                 # line overflows (happens when the load average is large).
10012                 plain_output = plain_output.getvalue()
10013                 if self._isatty and len(plain_output) > self.width:
10014                         # Use plain_output here since it's easier to truncate
10015                         # properly than the color output which contains console
10016                         # color codes.
10017                         self._update(plain_output[:self.width])
10018                 else:
10019                         self._update(color_output.getvalue())
10020
10021                 xtermTitle(" ".join(plain_output.split()))
10022
10023 class Scheduler(PollScheduler):
10024
10025         _opts_ignore_blockers = \
10026                 frozenset(["--buildpkgonly",
10027                 "--fetchonly", "--fetch-all-uri",
10028                 "--nodeps", "--pretend"])
10029
10030         _opts_no_background = \
10031                 frozenset(["--pretend",
10032                 "--fetchonly", "--fetch-all-uri"])
10033
10034         _opts_no_restart = frozenset(["--buildpkgonly",
10035                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10036
10037         _bad_resume_opts = set(["--ask", "--changelog",
10038                 "--resume", "--skipfirst"])
10039
10040         _fetch_log = "/var/log/emerge-fetch.log"
10041
10042         class _iface_class(SlotObject):
10043                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10044                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10045                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10046                         "unregister")
10047
10048         class _fetch_iface_class(SlotObject):
10049                 __slots__ = ("log_file", "schedule")
10050
10051         _task_queues_class = slot_dict_class(
10052                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10053
10054         class _build_opts_class(SlotObject):
10055                 __slots__ = ("buildpkg", "buildpkgonly",
10056                         "fetch_all_uri", "fetchonly", "pretend")
10057
10058         class _binpkg_opts_class(SlotObject):
10059                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10060
10061         class _pkg_count_class(SlotObject):
10062                 __slots__ = ("curval", "maxval")
10063
10064         class _emerge_log_class(SlotObject):
10065                 __slots__ = ("xterm_titles",)
10066
10067                 def log(self, *pargs, **kwargs):
10068                         if not self.xterm_titles:
10069                                 # Avoid interference with the scheduler's status display.
10070                                 kwargs.pop("short_msg", None)
10071                         emergelog(self.xterm_titles, *pargs, **kwargs)
10072
10073         class _failed_pkg(SlotObject):
10074                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10075
10076         class _ConfigPool(object):
10077                 """Interface for a task to temporarily allocate a config
10078                 instance from a pool. This allows a task to be constructed
10079                 long before the config instance actually becomes needed, like
10080                 when prefetchers are constructed for the whole merge list."""
10081                 __slots__ = ("_root", "_allocate", "_deallocate")
10082                 def __init__(self, root, allocate, deallocate):
10083                         self._root = root
10084                         self._allocate = allocate
10085                         self._deallocate = deallocate
10086                 def allocate(self):
10087                         return self._allocate(self._root)
10088                 def deallocate(self, settings):
10089                         self._deallocate(settings)
10090
10091         class _unknown_internal_error(portage.exception.PortageException):
10092                 """
10093                 Used internally to terminate scheduling. The specific reason for
10094                 the failure should have been dumped to stderr.
10095                 """
10096                 def __init__(self, value=""):
10097                         portage.exception.PortageException.__init__(self, value)
10098
10099         def __init__(self, settings, trees, mtimedb, myopts,
10100                 spinner, mergelist, favorites, digraph):
10101                 PollScheduler.__init__(self)
10102                 self.settings = settings
10103                 self.target_root = settings["ROOT"]
10104                 self.trees = trees
10105                 self.myopts = myopts
10106                 self._spinner = spinner
10107                 self._mtimedb = mtimedb
10108                 self._mergelist = mergelist
10109                 self._favorites = favorites
10110                 self._args_set = InternalPackageSet(favorites)
10111                 self._build_opts = self._build_opts_class()
10112                 for k in self._build_opts.__slots__:
10113                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10114                 self._binpkg_opts = self._binpkg_opts_class()
10115                 for k in self._binpkg_opts.__slots__:
10116                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10117
10118                 self.curval = 0
10119                 self._logger = self._emerge_log_class()
10120                 self._task_queues = self._task_queues_class()
10121                 for k in self._task_queues.allowed_keys:
10122                         setattr(self._task_queues, k,
10123                                 SequentialTaskQueue())
10124
10125                 # Holds merges that will wait to be executed when no builds are
10126                 # executing. This is useful for system packages since dependencies
10127                 # on system packages are frequently unspecified.
10128                 self._merge_wait_queue = []
10129                 # Holds merges that have been transfered from the merge_wait_queue to
10130                 # the actual merge queue. They are removed from this list upon
10131                 # completion. Other packages can start building only when this list is
10132                 # empty.
10133                 self._merge_wait_scheduled = []
10134
10135                 # Holds system packages and their deep runtime dependencies. Before
10136                 # being merged, these packages go to merge_wait_queue, to be merged
10137                 # when no other packages are building.
10138                 self._deep_system_deps = set()
10139
10140                 # Holds packages to merge which will satisfy currently unsatisfied
10141                 # deep runtime dependencies of system packages. If this is not empty
10142                 # then no parallel builds will be spawned until it is empty. This
10143                 # minimizes the possibility that a build will fail due to the system
10144                 # being in a fragile state. For example, see bug #259954.
10145                 self._unsatisfied_system_deps = set()
10146
10147                 self._status_display = JobStatusDisplay()
10148                 self._max_load = myopts.get("--load-average")
10149                 max_jobs = myopts.get("--jobs")
10150                 if max_jobs is None:
10151                         max_jobs = 1
10152                 self._set_max_jobs(max_jobs)
10153
10154                 # The root where the currently running
10155                 # portage instance is installed.
10156                 self._running_root = trees["/"]["root_config"]
10157                 self.edebug = 0
10158                 if settings.get("PORTAGE_DEBUG", "") == "1":
10159                         self.edebug = 1
10160                 self.pkgsettings = {}
10161                 self._config_pool = {}
10162                 self._blocker_db = {}
10163                 for root in trees:
10164                         self._config_pool[root] = []
10165                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10166
10167                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10168                         schedule=self._schedule_fetch)
10169                 self._sched_iface = self._iface_class(
10170                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10171                         dblinkDisplayMerge=self._dblink_display_merge,
10172                         dblinkElog=self._dblink_elog,
10173                         dblinkEmergeLog=self._dblink_emerge_log,
10174                         fetch=fetch_iface, register=self._register,
10175                         schedule=self._schedule_wait,
10176                         scheduleSetup=self._schedule_setup,
10177                         scheduleUnpack=self._schedule_unpack,
10178                         scheduleYield=self._schedule_yield,
10179                         unregister=self._unregister)
10180
10181                 self._prefetchers = weakref.WeakValueDictionary()
10182                 self._pkg_queue = []
10183                 self._completed_tasks = set()
10184
10185                 self._failed_pkgs = []
10186                 self._failed_pkgs_all = []
10187                 self._failed_pkgs_die_msgs = []
10188                 self._post_mod_echo_msgs = []
10189                 self._parallel_fetch = False
10190                 merge_count = len([x for x in mergelist \
10191                         if isinstance(x, Package) and x.operation == "merge"])
10192                 self._pkg_count = self._pkg_count_class(
10193                         curval=0, maxval=merge_count)
10194                 self._status_display.maxval = self._pkg_count.maxval
10195
10196                 # The load average takes some time to respond when new
10197                 # jobs are added, so we need to limit the rate of adding
10198                 # new jobs.
10199                 self._job_delay_max = 10
10200                 self._job_delay_factor = 1.0
10201                 self._job_delay_exp = 1.5
10202                 self._previous_job_start_time = None
10203
10204                 self._set_digraph(digraph)
10205
10206                 # This is used to memoize the _choose_pkg() result when
10207                 # no packages can be chosen until one of the existing
10208                 # jobs completes.
10209                 self._choose_pkg_return_early = False
10210
10211                 features = self.settings.features
10212                 if "parallel-fetch" in features and \
10213                         not ("--pretend" in self.myopts or \
10214                         "--fetch-all-uri" in self.myopts or \
10215                         "--fetchonly" in self.myopts):
10216                         if "distlocks" not in features:
10217                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10218                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10219                                         "requires the distlocks feature enabled"+"\n",
10220                                         noiselevel=-1)
10221                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10222                                         "thus parallel-fetching is being disabled"+"\n",
10223                                         noiselevel=-1)
10224                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10225                         elif len(mergelist) > 1:
10226                                 self._parallel_fetch = True
10227
10228                 if self._parallel_fetch:
10229                                 # clear out existing fetch log if it exists
10230                                 try:
10231                                         open(self._fetch_log, 'w')
10232                                 except EnvironmentError:
10233                                         pass
10234
10235                 self._running_portage = None
10236                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10237                         portage.const.PORTAGE_PACKAGE_ATOM)
10238                 if portage_match:
10239                         cpv = portage_match.pop()
10240                         self._running_portage = self._pkg(cpv, "installed",
10241                                 self._running_root, installed=True)
10242
10243         def _poll(self, timeout=None):
10244                 self._schedule()
10245                 PollScheduler._poll(self, timeout=timeout)
10246
10247         def _set_max_jobs(self, max_jobs):
10248                 self._max_jobs = max_jobs
10249                 self._task_queues.jobs.max_jobs = max_jobs
10250
10251         def _background_mode(self):
10252                 """
10253                 Check if background mode is enabled and adjust states as necessary.
10254
10255                 @rtype: bool
10256                 @returns: True if background mode is enabled, False otherwise.
10257                 """
10258                 background = (self._max_jobs is True or \
10259                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10260                         not bool(self._opts_no_background.intersection(self.myopts))
10261
10262                 if background:
10263                         interactive_tasks = self._get_interactive_tasks()
10264                         if interactive_tasks:
10265                                 background = False
10266                                 writemsg_level(">>> Sending package output to stdio due " + \
10267                                         "to interactive package(s):\n",
10268                                         level=logging.INFO, noiselevel=-1)
10269                                 msg = [""]
10270                                 for pkg in interactive_tasks:
10271                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10272                                         if pkg.root != "/":
10273                                                 pkg_str += " for " + pkg.root
10274                                         msg.append(pkg_str)
10275                                 msg.append("")
10276                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10277                                         level=logging.INFO, noiselevel=-1)
10278                                 if self._max_jobs is True or self._max_jobs > 1:
10279                                         self._set_max_jobs(1)
10280                                         writemsg_level(">>> Setting --jobs=1 due " + \
10281                                                 "to the above interactive package(s)\n",
10282                                                 level=logging.INFO, noiselevel=-1)
10283
10284                 self._status_display.quiet = \
10285                         not background or \
10286                         ("--quiet" in self.myopts and \
10287                         "--verbose" not in self.myopts)
10288
10289                 self._logger.xterm_titles = \
10290                         "notitles" not in self.settings.features and \
10291                         self._status_display.quiet
10292
10293                 return background
10294
10295         def _get_interactive_tasks(self):
10296                 from portage import flatten
10297                 from portage.dep import use_reduce, paren_reduce
10298                 interactive_tasks = []
10299                 for task in self._mergelist:
10300                         if not (isinstance(task, Package) and \
10301                                 task.operation == "merge"):
10302                                 continue
10303                         try:
10304                                 properties = flatten(use_reduce(paren_reduce(
10305                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10306                         except portage.exception.InvalidDependString, e:
10307                                 show_invalid_depstring_notice(task,
10308                                         task.metadata["PROPERTIES"], str(e))
10309                                 raise self._unknown_internal_error()
10310                         if "interactive" in properties:
10311                                 interactive_tasks.append(task)
10312                 return interactive_tasks
10313
10314         def _set_digraph(self, digraph):
10315                 if "--nodeps" in self.myopts or \
10316                         (self._max_jobs is not True and self._max_jobs < 2):
10317                         # save some memory
10318                         self._digraph = None
10319                         return
10320
10321                 self._digraph = digraph
10322                 self._find_system_deps()
10323                 self._prune_digraph()
10324                 self._prevent_builddir_collisions()
10325
10326         def _find_system_deps(self):
10327                 """
10328                 Find system packages and their deep runtime dependencies. Before being
10329                 merged, these packages go to merge_wait_queue, to be merged when no
10330                 other packages are building.
10331                 """
10332                 deep_system_deps = self._deep_system_deps
10333                 deep_system_deps.clear()
10334                 deep_system_deps.update(
10335                         _find_deep_system_runtime_deps(self._digraph))
10336                 deep_system_deps.difference_update([pkg for pkg in \
10337                         deep_system_deps if pkg.operation != "merge"])
10338
10339         def _prune_digraph(self):
10340                 """
10341                 Prune any root nodes that are irrelevant.
10342                 """
10343
10344                 graph = self._digraph
10345                 completed_tasks = self._completed_tasks
10346                 removed_nodes = set()
10347                 while True:
10348                         for node in graph.root_nodes():
10349                                 if not isinstance(node, Package) or \
10350                                         (node.installed and node.operation == "nomerge") or \
10351                                         node.onlydeps or \
10352                                         node in completed_tasks:
10353                                         removed_nodes.add(node)
10354                         if removed_nodes:
10355                                 graph.difference_update(removed_nodes)
10356                         if not removed_nodes:
10357                                 break
10358                         removed_nodes.clear()
10359
10360         def _prevent_builddir_collisions(self):
10361                 """
10362                 When building stages, sometimes the same exact cpv needs to be merged
10363                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10364                 in the builddir. Currently, normal file locks would be inappropriate
10365                 for this purpose since emerge holds all of it's build dir locks from
10366                 the main process.
10367                 """
10368                 cpv_map = {}
10369                 for pkg in self._mergelist:
10370                         if not isinstance(pkg, Package):
10371                                 # a satisfied blocker
10372                                 continue
10373                         if pkg.installed:
10374                                 continue
10375                         if pkg.cpv not in cpv_map:
10376                                 cpv_map[pkg.cpv] = [pkg]
10377                                 continue
10378                         for earlier_pkg in cpv_map[pkg.cpv]:
10379                                 self._digraph.add(earlier_pkg, pkg,
10380                                         priority=DepPriority(buildtime=True))
10381                         cpv_map[pkg.cpv].append(pkg)
10382
10383         class _pkg_failure(portage.exception.PortageException):
10384                 """
10385                 An instance of this class is raised by unmerge() when
10386                 an uninstallation fails.
10387                 """
10388                 status = 1
10389                 def __init__(self, *pargs):
10390                         portage.exception.PortageException.__init__(self, pargs)
10391                         if pargs:
10392                                 self.status = pargs[0]
10393
10394         def _schedule_fetch(self, fetcher):
10395                 """
10396                 Schedule a fetcher on the fetch queue, in order to
10397                 serialize access to the fetch log.
10398                 """
10399                 self._task_queues.fetch.addFront(fetcher)
10400
10401         def _schedule_setup(self, setup_phase):
10402                 """
10403                 Schedule a setup phase on the merge queue, in order to
10404                 serialize unsandboxed access to the live filesystem.
10405                 """
10406                 self._task_queues.merge.addFront(setup_phase)
10407                 self._schedule()
10408
10409         def _schedule_unpack(self, unpack_phase):
10410                 """
10411                 Schedule an unpack phase on the unpack queue, in order
10412                 to serialize $DISTDIR access for live ebuilds.
10413                 """
10414                 self._task_queues.unpack.add(unpack_phase)
10415
10416         def _find_blockers(self, new_pkg):
10417                 """
10418                 Returns a callable which should be called only when
10419                 the vdb lock has been acquired.
10420                 """
10421                 def get_blockers():
10422                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10423                 return get_blockers
10424
10425         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10426                 if self._opts_ignore_blockers.intersection(self.myopts):
10427                         return None
10428
10429                 # Call gc.collect() here to avoid heap overflow that
10430                 # triggers 'Cannot allocate memory' errors (reported
10431                 # with python-2.5).
10432                 import gc
10433                 gc.collect()
10434
10435                 blocker_db = self._blocker_db[new_pkg.root]
10436
10437                 blocker_dblinks = []
10438                 for blocking_pkg in blocker_db.findInstalledBlockers(
10439                         new_pkg, acquire_lock=acquire_lock):
10440                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10441                                 continue
10442                         if new_pkg.cpv == blocking_pkg.cpv:
10443                                 continue
10444                         blocker_dblinks.append(portage.dblink(
10445                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10446                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10447                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10448
10449                 gc.collect()
10450
10451                 return blocker_dblinks
10452
10453         def _dblink_pkg(self, pkg_dblink):
10454                 cpv = pkg_dblink.mycpv
10455                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10456                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10457                 installed = type_name == "installed"
10458                 return self._pkg(cpv, type_name, root_config, installed=installed)
10459
10460         def _append_to_log_path(self, log_path, msg):
10461                 f = open(log_path, 'a')
10462                 try:
10463                         f.write(msg)
10464                 finally:
10465                         f.close()
10466
10467         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10468
10469                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10470                 log_file = None
10471                 out = sys.stdout
10472                 background = self._background
10473
10474                 if background and log_path is not None:
10475                         log_file = open(log_path, 'a')
10476                         out = log_file
10477
10478                 try:
10479                         for msg in msgs:
10480                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10481                 finally:
10482                         if log_file is not None:
10483                                 log_file.close()
10484
10485         def _dblink_emerge_log(self, msg):
10486                 self._logger.log(msg)
10487
10488         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10489                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10490                 background = self._background
10491
10492                 if log_path is None:
10493                         if not (background and level < logging.WARN):
10494                                 portage.util.writemsg_level(msg,
10495                                         level=level, noiselevel=noiselevel)
10496                 else:
10497                         if not background:
10498                                 portage.util.writemsg_level(msg,
10499                                         level=level, noiselevel=noiselevel)
10500                         self._append_to_log_path(log_path, msg)
10501
10502         def _dblink_ebuild_phase(self,
10503                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10504                 """
10505                 Using this callback for merge phases allows the scheduler
10506                 to run while these phases execute asynchronously, and allows
10507                 the scheduler control output handling.
10508                 """
10509
10510                 scheduler = self._sched_iface
10511                 settings = pkg_dblink.settings
10512                 pkg = self._dblink_pkg(pkg_dblink)
10513                 background = self._background
10514                 log_path = settings.get("PORTAGE_LOG_FILE")
10515
10516                 ebuild_phase = EbuildPhase(background=background,
10517                         pkg=pkg, phase=phase, scheduler=scheduler,
10518                         settings=settings, tree=pkg_dblink.treetype)
10519                 ebuild_phase.start()
10520                 ebuild_phase.wait()
10521
10522                 return ebuild_phase.returncode
10523
10524         def _generate_digests(self):
10525                 """
10526                 Generate digests if necessary for --digests or FEATURES=digest.
10527                 In order to avoid interference, this must done before parallel
10528                 tasks are started.
10529                 """
10530
10531                 if '--fetchonly' in self.myopts:
10532                         return os.EX_OK
10533
10534                 digest = '--digest' in self.myopts
10535                 if not digest:
10536                         for pkgsettings in self.pkgsettings.itervalues():
10537                                 if 'digest' in pkgsettings.features:
10538                                         digest = True
10539                                         break
10540
10541                 if not digest:
10542                         return os.EX_OK
10543
10544                 for x in self._mergelist:
10545                         if not isinstance(x, Package) or \
10546                                 x.type_name != 'ebuild' or \
10547                                 x.operation != 'merge':
10548                                 continue
10549                         pkgsettings = self.pkgsettings[x.root]
10550                         if '--digest' not in self.myopts and \
10551                                 'digest' not in pkgsettings.features:
10552                                 continue
10553                         portdb = x.root_config.trees['porttree'].dbapi
10554                         ebuild_path = portdb.findname(x.cpv)
10555                         if not ebuild_path:
10556                                 writemsg_level(
10557                                         "!!! Could not locate ebuild for '%s'.\n" \
10558                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10559                                 return 1
10560                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10561                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10562                                 writemsg_level(
10563                                         "!!! Unable to generate manifest for '%s'.\n" \
10564                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10565                                 return 1
10566
10567                 return os.EX_OK
10568
10569         def _check_manifests(self):
10570                 # Verify all the manifests now so that the user is notified of failure
10571                 # as soon as possible.
10572                 if "strict" not in self.settings.features or \
10573                         "--fetchonly" in self.myopts or \
10574                         "--fetch-all-uri" in self.myopts:
10575                         return os.EX_OK
10576
10577                 shown_verifying_msg = False
10578                 quiet_settings = {}
10579                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10580                         quiet_config = portage.config(clone=pkgsettings)
10581                         quiet_config["PORTAGE_QUIET"] = "1"
10582                         quiet_config.backup_changes("PORTAGE_QUIET")
10583                         quiet_settings[myroot] = quiet_config
10584                         del quiet_config
10585
10586                 for x in self._mergelist:
10587                         if not isinstance(x, Package) or \
10588                                 x.type_name != "ebuild":
10589                                 continue
10590
10591                         if not shown_verifying_msg:
10592                                 shown_verifying_msg = True
10593                                 self._status_msg("Verifying ebuild manifests")
10594
10595                         root_config = x.root_config
10596                         portdb = root_config.trees["porttree"].dbapi
10597                         quiet_config = quiet_settings[root_config.root]
10598                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10599                         if not portage.digestcheck([], quiet_config, strict=True):
10600                                 return 1
10601
10602                 return os.EX_OK
10603
10604         def _add_prefetchers(self):
10605
10606                 if not self._parallel_fetch:
10607                         return
10608
10609                 if self._parallel_fetch:
10610                         self._status_msg("Starting parallel fetch")
10611
10612                         prefetchers = self._prefetchers
10613                         getbinpkg = "--getbinpkg" in self.myopts
10614
10615                         # In order to avoid "waiting for lock" messages
10616                         # at the beginning, which annoy users, never
10617                         # spawn a prefetcher for the first package.
10618                         for pkg in self._mergelist[1:]:
10619                                 prefetcher = self._create_prefetcher(pkg)
10620                                 if prefetcher is not None:
10621                                         self._task_queues.fetch.add(prefetcher)
10622                                         prefetchers[pkg] = prefetcher
10623
10624         def _create_prefetcher(self, pkg):
10625                 """
10626                 @return: a prefetcher, or None if not applicable
10627                 """
10628                 prefetcher = None
10629
10630                 if not isinstance(pkg, Package):
10631                         pass
10632
10633                 elif pkg.type_name == "ebuild":
10634
10635                         prefetcher = EbuildFetcher(background=True,
10636                                 config_pool=self._ConfigPool(pkg.root,
10637                                 self._allocate_config, self._deallocate_config),
10638                                 fetchonly=1, logfile=self._fetch_log,
10639                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10640
10641                 elif pkg.type_name == "binary" and \
10642                         "--getbinpkg" in self.myopts and \
10643                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10644
10645                         prefetcher = BinpkgPrefetcher(background=True,
10646                                 pkg=pkg, scheduler=self._sched_iface)
10647
10648                 return prefetcher
10649
10650         def _is_restart_scheduled(self):
10651                 """
10652                 Check if the merge list contains a replacement
10653                 for the current running instance, that will result
10654                 in restart after merge.
10655                 @rtype: bool
10656                 @returns: True if a restart is scheduled, False otherwise.
10657                 """
10658                 if self._opts_no_restart.intersection(self.myopts):
10659                         return False
10660
10661                 mergelist = self._mergelist
10662
10663                 for i, pkg in enumerate(mergelist):
10664                         if self._is_restart_necessary(pkg) and \
10665                                 i != len(mergelist) - 1:
10666                                 return True
10667
10668                 return False
10669
10670         def _is_restart_necessary(self, pkg):
10671                 """
10672                 @return: True if merging the given package
10673                         requires restart, False otherwise.
10674                 """
10675
10676                 # Figure out if we need a restart.
10677                 if pkg.root == self._running_root.root and \
10678                         portage.match_from_list(
10679                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10680                         if self._running_portage:
10681                                 return pkg.cpv != self._running_portage.cpv
10682                         return True
10683                 return False
10684
10685         def _restart_if_necessary(self, pkg):
10686                 """
10687                 Use execv() to restart emerge. This happens
10688                 if portage upgrades itself and there are
10689                 remaining packages in the list.
10690                 """
10691
10692                 if self._opts_no_restart.intersection(self.myopts):
10693                         return
10694
10695                 if not self._is_restart_necessary(pkg):
10696                         return
10697
10698                 if pkg == self._mergelist[-1]:
10699                         return
10700
10701                 self._main_loop_cleanup()
10702
10703                 logger = self._logger
10704                 pkg_count = self._pkg_count
10705                 mtimedb = self._mtimedb
10706                 bad_resume_opts = self._bad_resume_opts
10707
10708                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10709                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10710
10711                 logger.log(" *** RESTARTING " + \
10712                         "emerge via exec() after change of " + \
10713                         "portage version.")
10714
10715                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10716                 mtimedb.commit()
10717                 portage.run_exitfuncs()
10718                 mynewargv = [sys.argv[0], "--resume"]
10719                 resume_opts = self.myopts.copy()
10720                 # For automatic resume, we need to prevent
10721                 # any of bad_resume_opts from leaking in
10722                 # via EMERGE_DEFAULT_OPTS.
10723                 resume_opts["--ignore-default-opts"] = True
10724                 for myopt, myarg in resume_opts.iteritems():
10725                         if myopt not in bad_resume_opts:
10726                                 if myarg is True:
10727                                         mynewargv.append(myopt)
10728                                 else:
10729                                         mynewargv.append(myopt +"="+ str(myarg))
10730                 # priority only needs to be adjusted on the first run
10731                 os.environ["PORTAGE_NICENESS"] = "0"
10732                 os.execv(mynewargv[0], mynewargv)
10733
10734         def merge(self):
10735
10736                 if "--resume" in self.myopts:
10737                         # We're resuming.
10738                         portage.writemsg_stdout(
10739                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10740                         self._logger.log(" *** Resuming merge...")
10741
10742                 self._save_resume_list()
10743
10744                 try:
10745                         self._background = self._background_mode()
10746                 except self._unknown_internal_error:
10747                         return 1
10748
10749                 for root in self.trees:
10750                         root_config = self.trees[root]["root_config"]
10751
10752                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10753                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10754                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10755                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10756                         if not tmpdir or not os.path.isdir(tmpdir):
10757                                 msg = "The directory specified in your " + \
10758                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10759                                 "does not exist. Please create this " + \
10760                                 "directory or correct your PORTAGE_TMPDIR setting."
10761                                 msg = textwrap.wrap(msg, 70)
10762                                 out = portage.output.EOutput()
10763                                 for l in msg:
10764                                         out.eerror(l)
10765                                 return 1
10766
10767                         if self._background:
10768                                 root_config.settings.unlock()
10769                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10770                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10771                                 root_config.settings.lock()
10772
10773                         self.pkgsettings[root] = portage.config(
10774                                 clone=root_config.settings)
10775
10776                 rval = self._generate_digests()
10777                 if rval != os.EX_OK:
10778                         return rval
10779
10780                 rval = self._check_manifests()
10781                 if rval != os.EX_OK:
10782                         return rval
10783
10784                 keep_going = "--keep-going" in self.myopts
10785                 fetchonly = self._build_opts.fetchonly
10786                 mtimedb = self._mtimedb
10787                 failed_pkgs = self._failed_pkgs
10788
10789                 while True:
10790                         rval = self._merge()
10791                         if rval == os.EX_OK or fetchonly or not keep_going:
10792                                 break
10793                         if "resume" not in mtimedb:
10794                                 break
10795                         mergelist = self._mtimedb["resume"].get("mergelist")
10796                         if not mergelist:
10797                                 break
10798
10799                         if not failed_pkgs:
10800                                 break
10801
10802                         for failed_pkg in failed_pkgs:
10803                                 mergelist.remove(list(failed_pkg.pkg))
10804
10805                         self._failed_pkgs_all.extend(failed_pkgs)
10806                         del failed_pkgs[:]
10807
10808                         if not mergelist:
10809                                 break
10810
10811                         if not self._calc_resume_list():
10812                                 break
10813
10814                         clear_caches(self.trees)
10815                         if not self._mergelist:
10816                                 break
10817
10818                         self._save_resume_list()
10819                         self._pkg_count.curval = 0
10820                         self._pkg_count.maxval = len([x for x in self._mergelist \
10821                                 if isinstance(x, Package) and x.operation == "merge"])
10822                         self._status_display.maxval = self._pkg_count.maxval
10823
10824                 self._logger.log(" *** Finished. Cleaning up...")
10825
10826                 if failed_pkgs:
10827                         self._failed_pkgs_all.extend(failed_pkgs)
10828                         del failed_pkgs[:]
10829
10830                 background = self._background
10831                 failure_log_shown = False
10832                 if background and len(self._failed_pkgs_all) == 1:
10833                         # If only one package failed then just show it's
10834                         # whole log for easy viewing.
10835                         failed_pkg = self._failed_pkgs_all[-1]
10836                         build_dir = failed_pkg.build_dir
10837                         log_file = None
10838
10839                         log_paths = [failed_pkg.build_log]
10840
10841                         log_path = self._locate_failure_log(failed_pkg)
10842                         if log_path is not None:
10843                                 try:
10844                                         log_file = open(log_path)
10845                                 except IOError:
10846                                         pass
10847
10848                         if log_file is not None:
10849                                 try:
10850                                         for line in log_file:
10851                                                 writemsg_level(line, noiselevel=-1)
10852                                 finally:
10853                                         log_file.close()
10854                                 failure_log_shown = True
10855
10856                 # Dump mod_echo output now since it tends to flood the terminal.
10857                 # This allows us to avoid having more important output, generated
10858                 # later, from being swept away by the mod_echo output.
10859                 mod_echo_output =  _flush_elog_mod_echo()
10860
10861                 if background and not failure_log_shown and \
10862                         self._failed_pkgs_all and \
10863                         self._failed_pkgs_die_msgs and \
10864                         not mod_echo_output:
10865
10866                         printer = portage.output.EOutput()
10867                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10868                                 root_msg = ""
10869                                 if mysettings["ROOT"] != "/":
10870                                         root_msg = " merged to %s" % mysettings["ROOT"]
10871                                 print
10872                                 printer.einfo("Error messages for package %s%s:" % \
10873                                         (colorize("INFORM", key), root_msg))
10874                                 print
10875                                 for phase in portage.const.EBUILD_PHASES:
10876                                         if phase not in logentries:
10877                                                 continue
10878                                         for msgtype, msgcontent in logentries[phase]:
10879                                                 if isinstance(msgcontent, basestring):
10880                                                         msgcontent = [msgcontent]
10881                                                 for line in msgcontent:
10882                                                         printer.eerror(line.strip("\n"))
10883
10884                 if self._post_mod_echo_msgs:
10885                         for msg in self._post_mod_echo_msgs:
10886                                 msg()
10887
10888                 if len(self._failed_pkgs_all) > 1 or \
10889                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10890                         if len(self._failed_pkgs_all) > 1:
10891                                 msg = "The following %d packages have " % \
10892                                         len(self._failed_pkgs_all) + \
10893                                         "failed to build or install:"
10894                         else:
10895                                 msg = "The following package has " + \
10896                                         "failed to build or install:"
10897                         prefix = bad(" * ")
10898                         writemsg(prefix + "\n", noiselevel=-1)
10899                         from textwrap import wrap
10900                         for line in wrap(msg, 72):
10901                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10902                         writemsg(prefix + "\n", noiselevel=-1)
10903                         for failed_pkg in self._failed_pkgs_all:
10904                                 writemsg("%s\t%s\n" % (prefix,
10905                                         colorize("INFORM", str(failed_pkg.pkg))),
10906                                         noiselevel=-1)
10907                         writemsg(prefix + "\n", noiselevel=-1)
10908
10909                 return rval
10910
10911         def _elog_listener(self, mysettings, key, logentries, fulltext):
10912                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10913                 if errors:
10914                         self._failed_pkgs_die_msgs.append(
10915                                 (mysettings, key, errors))
10916
10917         def _locate_failure_log(self, failed_pkg):
10918
10919                 build_dir = failed_pkg.build_dir
10920                 log_file = None
10921
10922                 log_paths = [failed_pkg.build_log]
10923
10924                 for log_path in log_paths:
10925                         if not log_path:
10926                                 continue
10927
10928                         try:
10929                                 log_size = os.stat(log_path).st_size
10930                         except OSError:
10931                                 continue
10932
10933                         if log_size == 0:
10934                                 continue
10935
10936                         return log_path
10937
10938                 return None
10939
10940         def _add_packages(self):
10941                 pkg_queue = self._pkg_queue
10942                 for pkg in self._mergelist:
10943                         if isinstance(pkg, Package):
10944                                 pkg_queue.append(pkg)
10945                         elif isinstance(pkg, Blocker):
10946                                 pass
10947
10948         def _system_merge_started(self, merge):
10949                 """
10950                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
10951                 """
10952                 graph = self._digraph
10953                 if graph is None:
10954                         return
10955                 pkg = merge.merge.pkg
10956
10957                 # Skip this if $ROOT != / since it shouldn't matter if there
10958                 # are unsatisfied system runtime deps in this case.
10959                 if pkg.root != '/':
10960                         return
10961
10962                 completed_tasks = self._completed_tasks
10963                 unsatisfied = self._unsatisfied_system_deps
10964
10965                 def ignore_non_runtime_or_satisfied(priority):
10966                         """
10967                         Ignore non-runtime and satisfied runtime priorities.
10968                         """
10969                         if isinstance(priority, DepPriority) and \
10970                                 not priority.satisfied and \
10971                                 (priority.runtime or priority.runtime_post):
10972                                 return False
10973                         return True
10974
10975                 # When checking for unsatisfied runtime deps, only check
10976                 # direct deps since indirect deps are checked when the
10977                 # corresponding parent is merged.
10978                 for child in graph.child_nodes(pkg,
10979                         ignore_priority=ignore_non_runtime_or_satisfied):
10980                         if not isinstance(child, Package) or \
10981                                 child.operation == 'uninstall':
10982                                 continue
10983                         if child is pkg:
10984                                 continue
10985                         if child.operation == 'merge' and \
10986                                 child not in completed_tasks:
10987                                 unsatisfied.add(child)
10988
10989         def _merge_wait_exit_handler(self, task):
10990                 self._merge_wait_scheduled.remove(task)
10991                 self._merge_exit(task)
10992
10993         def _merge_exit(self, merge):
10994                 self._do_merge_exit(merge)
10995                 self._deallocate_config(merge.merge.settings)
10996                 if merge.returncode == os.EX_OK and \
10997                         not merge.merge.pkg.installed:
10998                         self._status_display.curval += 1
10999                 self._status_display.merges = len(self._task_queues.merge)
11000                 self._schedule()
11001
11002         def _do_merge_exit(self, merge):
11003                 pkg = merge.merge.pkg
11004                 if merge.returncode != os.EX_OK:
11005                         settings = merge.merge.settings
11006                         build_dir = settings.get("PORTAGE_BUILDDIR")
11007                         build_log = settings.get("PORTAGE_LOG_FILE")
11008
11009                         self._failed_pkgs.append(self._failed_pkg(
11010                                 build_dir=build_dir, build_log=build_log,
11011                                 pkg=pkg,
11012                                 returncode=merge.returncode))
11013                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11014
11015                         self._status_display.failed = len(self._failed_pkgs)
11016                         return
11017
11018                 self._task_complete(pkg)
11019                 pkg_to_replace = merge.merge.pkg_to_replace
11020                 if pkg_to_replace is not None:
11021                         # When a package is replaced, mark it's uninstall
11022                         # task complete (if any).
11023                         uninst_hash_key = \
11024                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11025                         self._task_complete(uninst_hash_key)
11026
11027                 if pkg.installed:
11028                         return
11029
11030                 self._restart_if_necessary(pkg)
11031
11032                 # Call mtimedb.commit() after each merge so that
11033                 # --resume still works after being interrupted
11034                 # by reboot, sigkill or similar.
11035                 mtimedb = self._mtimedb
11036                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11037                 if not mtimedb["resume"]["mergelist"]:
11038                         del mtimedb["resume"]
11039                 mtimedb.commit()
11040
11041         def _build_exit(self, build):
11042                 if build.returncode == os.EX_OK:
11043                         self.curval += 1
11044                         merge = PackageMerge(merge=build)
11045                         if not build.build_opts.buildpkgonly and \
11046                                 build.pkg in self._deep_system_deps:
11047                                 # Since dependencies on system packages are frequently
11048                                 # unspecified, merge them only when no builds are executing.
11049                                 self._merge_wait_queue.append(merge)
11050                                 merge.addStartListener(self._system_merge_started)
11051                         else:
11052                                 merge.addExitListener(self._merge_exit)
11053                                 self._task_queues.merge.add(merge)
11054                                 self._status_display.merges = len(self._task_queues.merge)
11055                 else:
11056                         settings = build.settings
11057                         build_dir = settings.get("PORTAGE_BUILDDIR")
11058                         build_log = settings.get("PORTAGE_LOG_FILE")
11059
11060                         self._failed_pkgs.append(self._failed_pkg(
11061                                 build_dir=build_dir, build_log=build_log,
11062                                 pkg=build.pkg,
11063                                 returncode=build.returncode))
11064                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11065
11066                         self._status_display.failed = len(self._failed_pkgs)
11067                         self._deallocate_config(build.settings)
11068                 self._jobs -= 1
11069                 self._status_display.running = self._jobs
11070                 self._schedule()
11071
11072         def _extract_exit(self, build):
11073                 self._build_exit(build)
11074
11075         def _task_complete(self, pkg):
11076                 self._completed_tasks.add(pkg)
11077                 self._unsatisfied_system_deps.discard(pkg)
11078                 self._choose_pkg_return_early = False
11079
11080         def _merge(self):
11081
11082                 self._add_prefetchers()
11083                 self._add_packages()
11084                 pkg_queue = self._pkg_queue
11085                 failed_pkgs = self._failed_pkgs
11086                 portage.locks._quiet = self._background
11087                 portage.elog._emerge_elog_listener = self._elog_listener
11088                 rval = os.EX_OK
11089
11090                 try:
11091                         self._main_loop()
11092                 finally:
11093                         self._main_loop_cleanup()
11094                         portage.locks._quiet = False
11095                         portage.elog._emerge_elog_listener = None
11096                         if failed_pkgs:
11097                                 rval = failed_pkgs[-1].returncode
11098
11099                 return rval
11100
11101         def _main_loop_cleanup(self):
11102                 del self._pkg_queue[:]
11103                 self._completed_tasks.clear()
11104                 self._deep_system_deps.clear()
11105                 self._unsatisfied_system_deps.clear()
11106                 self._choose_pkg_return_early = False
11107                 self._status_display.reset()
11108                 self._digraph = None
11109                 self._task_queues.fetch.clear()
11110
11111         def _choose_pkg(self):
11112                 """
11113                 Choose a task that has all it's dependencies satisfied.
11114                 """
11115
11116                 if self._choose_pkg_return_early:
11117                         return None
11118
11119                 if self._digraph is None:
11120                         if (self._jobs or self._task_queues.merge) and \
11121                                 not ("--nodeps" in self.myopts and \
11122                                 (self._max_jobs is True or self._max_jobs > 1)):
11123                                 self._choose_pkg_return_early = True
11124                                 return None
11125                         return self._pkg_queue.pop(0)
11126
11127                 if not (self._jobs or self._task_queues.merge):
11128                         return self._pkg_queue.pop(0)
11129
11130                 self._prune_digraph()
11131
11132                 chosen_pkg = None
11133                 later = set(self._pkg_queue)
11134                 for pkg in self._pkg_queue:
11135                         later.remove(pkg)
11136                         if not self._dependent_on_scheduled_merges(pkg, later):
11137                                 chosen_pkg = pkg
11138                                 break
11139
11140                 if chosen_pkg is not None:
11141                         self._pkg_queue.remove(chosen_pkg)
11142
11143                 if chosen_pkg is None:
11144                         # There's no point in searching for a package to
11145                         # choose until at least one of the existing jobs
11146                         # completes.
11147                         self._choose_pkg_return_early = True
11148
11149                 return chosen_pkg
11150
11151         def _dependent_on_scheduled_merges(self, pkg, later):
11152                 """
11153                 Traverse the subgraph of the given packages deep dependencies
11154                 to see if it contains any scheduled merges.
11155                 @param pkg: a package to check dependencies for
11156                 @type pkg: Package
11157                 @param later: packages for which dependence should be ignored
11158                         since they will be merged later than pkg anyway and therefore
11159                         delaying the merge of pkg will not result in a more optimal
11160                         merge order
11161                 @type later: set
11162                 @rtype: bool
11163                 @returns: True if the package is dependent, False otherwise.
11164                 """
11165
11166                 graph = self._digraph
11167                 completed_tasks = self._completed_tasks
11168
11169                 dependent = False
11170                 traversed_nodes = set([pkg])
11171                 direct_deps = graph.child_nodes(pkg)
11172                 node_stack = direct_deps
11173                 direct_deps = frozenset(direct_deps)
11174                 while node_stack:
11175                         node = node_stack.pop()
11176                         if node in traversed_nodes:
11177                                 continue
11178                         traversed_nodes.add(node)
11179                         if not ((node.installed and node.operation == "nomerge") or \
11180                                 (node.operation == "uninstall" and \
11181                                 node not in direct_deps) or \
11182                                 node in completed_tasks or \
11183                                 node in later):
11184                                 dependent = True
11185                                 break
11186                         node_stack.extend(graph.child_nodes(node))
11187
11188                 return dependent
11189
11190         def _allocate_config(self, root):
11191                 """
11192                 Allocate a unique config instance for a task in order
11193                 to prevent interference between parallel tasks.
11194                 """
11195                 if self._config_pool[root]:
11196                         temp_settings = self._config_pool[root].pop()
11197                 else:
11198                         temp_settings = portage.config(clone=self.pkgsettings[root])
11199                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11200                 # performance reasons, call it here to make sure all settings from the
11201                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11202                 temp_settings.reload()
11203                 temp_settings.reset()
11204                 return temp_settings
11205
11206         def _deallocate_config(self, settings):
11207                 self._config_pool[settings["ROOT"]].append(settings)
11208
11209         def _main_loop(self):
11210
11211                 # Only allow 1 job max if a restart is scheduled
11212                 # due to portage update.
11213                 if self._is_restart_scheduled() or \
11214                         self._opts_no_background.intersection(self.myopts):
11215                         self._set_max_jobs(1)
11216
11217                 merge_queue = self._task_queues.merge
11218
11219                 while self._schedule():
11220                         if self._poll_event_handlers:
11221                                 self._poll_loop()
11222
11223                 while True:
11224                         self._schedule()
11225                         if not (self._jobs or merge_queue):
11226                                 break
11227                         if self._poll_event_handlers:
11228                                 self._poll_loop()
11229
11230         def _keep_scheduling(self):
11231                 return bool(self._pkg_queue and \
11232                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11233
11234         def _schedule_tasks(self):
11235
11236                 # When the number of jobs drops to zero, process all waiting merges.
11237                 if not self._jobs and self._merge_wait_queue:
11238                         for task in self._merge_wait_queue:
11239                                 task.addExitListener(self._merge_wait_exit_handler)
11240                                 self._task_queues.merge.add(task)
11241                         self._status_display.merges = len(self._task_queues.merge)
11242                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11243                         del self._merge_wait_queue[:]
11244
11245                 self._schedule_tasks_imp()
11246                 self._status_display.display()
11247
11248                 state_change = 0
11249                 for q in self._task_queues.values():
11250                         if q.schedule():
11251                                 state_change += 1
11252
11253                 # Cancel prefetchers if they're the only reason
11254                 # the main poll loop is still running.
11255                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11256                         not (self._jobs or self._task_queues.merge) and \
11257                         self._task_queues.fetch:
11258                         self._task_queues.fetch.clear()
11259                         state_change += 1
11260
11261                 if state_change:
11262                         self._schedule_tasks_imp()
11263                         self._status_display.display()
11264
11265                 return self._keep_scheduling()
11266
11267         def _job_delay(self):
11268                 """
11269                 @rtype: bool
11270                 @returns: True if job scheduling should be delayed, False otherwise.
11271                 """
11272
11273                 if self._jobs and self._max_load is not None:
11274
11275                         current_time = time.time()
11276
11277                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11278                         if delay > self._job_delay_max:
11279                                 delay = self._job_delay_max
11280                         if (current_time - self._previous_job_start_time) < delay:
11281                                 return True
11282
11283                 return False
11284
11285         def _schedule_tasks_imp(self):
11286                 """
11287                 @rtype: bool
11288                 @returns: True if state changed, False otherwise.
11289                 """
11290
11291                 state_change = 0
11292
11293                 while True:
11294
11295                         if not self._keep_scheduling():
11296                                 return bool(state_change)
11297
11298                         if self._choose_pkg_return_early or \
11299                                 self._merge_wait_scheduled or \
11300                                 (self._jobs and self._unsatisfied_system_deps) or \
11301                                 not self._can_add_job() or \
11302                                 self._job_delay():
11303                                 return bool(state_change)
11304
11305                         pkg = self._choose_pkg()
11306                         if pkg is None:
11307                                 return bool(state_change)
11308
11309                         state_change += 1
11310
11311                         if not pkg.installed:
11312                                 self._pkg_count.curval += 1
11313
11314                         task = self._task(pkg)
11315
11316                         if pkg.installed:
11317                                 merge = PackageMerge(merge=task)
11318                                 merge.addExitListener(self._merge_exit)
11319                                 self._task_queues.merge.add(merge)
11320
11321                         elif pkg.built:
11322                                 self._jobs += 1
11323                                 self._previous_job_start_time = time.time()
11324                                 self._status_display.running = self._jobs
11325                                 task.addExitListener(self._extract_exit)
11326                                 self._task_queues.jobs.add(task)
11327
11328                         else:
11329                                 self._jobs += 1
11330                                 self._previous_job_start_time = time.time()
11331                                 self._status_display.running = self._jobs
11332                                 task.addExitListener(self._build_exit)
11333                                 self._task_queues.jobs.add(task)
11334
11335                 return bool(state_change)
11336
11337         def _task(self, pkg):
11338
11339                 pkg_to_replace = None
11340                 if pkg.operation != "uninstall":
11341                         vardb = pkg.root_config.trees["vartree"].dbapi
11342                         previous_cpv = vardb.match(pkg.slot_atom)
11343                         if previous_cpv:
11344                                 previous_cpv = previous_cpv.pop()
11345                                 pkg_to_replace = self._pkg(previous_cpv,
11346                                         "installed", pkg.root_config, installed=True)
11347
11348                 task = MergeListItem(args_set=self._args_set,
11349                         background=self._background, binpkg_opts=self._binpkg_opts,
11350                         build_opts=self._build_opts,
11351                         config_pool=self._ConfigPool(pkg.root,
11352                         self._allocate_config, self._deallocate_config),
11353                         emerge_opts=self.myopts,
11354                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11355                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11356                         pkg_to_replace=pkg_to_replace,
11357                         prefetcher=self._prefetchers.get(pkg),
11358                         scheduler=self._sched_iface,
11359                         settings=self._allocate_config(pkg.root),
11360                         statusMessage=self._status_msg,
11361                         world_atom=self._world_atom)
11362
11363                 return task
11364
11365         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11366                 pkg = failed_pkg.pkg
11367                 msg = "%s to %s %s" % \
11368                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11369                 if pkg.root != "/":
11370                         msg += " %s %s" % (preposition, pkg.root)
11371
11372                 log_path = self._locate_failure_log(failed_pkg)
11373                 if log_path is not None:
11374                         msg += ", Log file:"
11375                 self._status_msg(msg)
11376
11377                 if log_path is not None:
11378                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11379
11380         def _status_msg(self, msg):
11381                 """
11382                 Display a brief status message (no newlines) in the status display.
11383                 This is called by tasks to provide feedback to the user. This
11384                 delegates the resposibility of generating \r and \n control characters,
11385                 to guarantee that lines are created or erased when necessary and
11386                 appropriate.
11387
11388                 @type msg: str
11389                 @param msg: a brief status message (no newlines allowed)
11390                 """
11391                 if not self._background:
11392                         writemsg_level("\n")
11393                 self._status_display.displayMessage(msg)
11394
11395         def _save_resume_list(self):
11396                 """
11397                 Do this before verifying the ebuild Manifests since it might
11398                 be possible for the user to use --resume --skipfirst get past
11399                 a non-essential package with a broken digest.
11400                 """
11401                 mtimedb = self._mtimedb
11402                 mtimedb["resume"]["mergelist"] = [list(x) \
11403                         for x in self._mergelist \
11404                         if isinstance(x, Package) and x.operation == "merge"]
11405
11406                 mtimedb.commit()
11407
11408         def _calc_resume_list(self):
11409                 """
11410                 Use the current resume list to calculate a new one,
11411                 dropping any packages with unsatisfied deps.
11412                 @rtype: bool
11413                 @returns: True if successful, False otherwise.
11414                 """
11415                 print colorize("GOOD", "*** Resuming merge...")
11416
11417                 if self._show_list():
11418                         if "--tree" in self.myopts:
11419                                 portage.writemsg_stdout("\n" + \
11420                                         darkgreen("These are the packages that " + \
11421                                         "would be merged, in reverse order:\n\n"))
11422
11423                         else:
11424                                 portage.writemsg_stdout("\n" + \
11425                                         darkgreen("These are the packages that " + \
11426                                         "would be merged, in order:\n\n"))
11427
11428                 show_spinner = "--quiet" not in self.myopts and \
11429                         "--nodeps" not in self.myopts
11430
11431                 if show_spinner:
11432                         print "Calculating dependencies  ",
11433
11434                 myparams = create_depgraph_params(self.myopts, None)
11435                 success = False
11436                 e = None
11437                 try:
11438                         success, mydepgraph, dropped_tasks = resume_depgraph(
11439                                 self.settings, self.trees, self._mtimedb, self.myopts,
11440                                 myparams, self._spinner)
11441                 except depgraph.UnsatisfiedResumeDep, exc:
11442                         # rename variable to avoid python-3.0 error:
11443                         # SyntaxError: can not delete variable 'e' referenced in nested
11444                         #              scope
11445                         e = exc
11446                         mydepgraph = e.depgraph
11447                         dropped_tasks = set()
11448
11449                 if show_spinner:
11450                         print "\b\b... done!"
11451
11452                 if e is not None:
11453                         def unsatisfied_resume_dep_msg():
11454                                 mydepgraph.display_problems()
11455                                 out = portage.output.EOutput()
11456                                 out.eerror("One or more packages are either masked or " + \
11457                                         "have missing dependencies:")
11458                                 out.eerror("")
11459                                 indent = "  "
11460                                 show_parents = set()
11461                                 for dep in e.value:
11462                                         if dep.parent in show_parents:
11463                                                 continue
11464                                         show_parents.add(dep.parent)
11465                                         if dep.atom is None:
11466                                                 out.eerror(indent + "Masked package:")
11467                                                 out.eerror(2 * indent + str(dep.parent))
11468                                                 out.eerror("")
11469                                         else:
11470                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11471                                                 out.eerror(2 * indent + str(dep.parent))
11472                                                 out.eerror("")
11473                                 msg = "The resume list contains packages " + \
11474                                         "that are either masked or have " + \
11475                                         "unsatisfied dependencies. " + \
11476                                         "Please restart/continue " + \
11477                                         "the operation manually, or use --skipfirst " + \
11478                                         "to skip the first package in the list and " + \
11479                                         "any other packages that may be " + \
11480                                         "masked or have missing dependencies."
11481                                 for line in textwrap.wrap(msg, 72):
11482                                         out.eerror(line)
11483                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11484                         return False
11485
11486                 if success and self._show_list():
11487                         mylist = mydepgraph.altlist()
11488                         if mylist:
11489                                 if "--tree" in self.myopts:
11490                                         mylist.reverse()
11491                                 mydepgraph.display(mylist, favorites=self._favorites)
11492
11493                 if not success:
11494                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11495                         return False
11496                 mydepgraph.display_problems()
11497
11498                 mylist = mydepgraph.altlist()
11499                 mydepgraph.break_refs(mylist)
11500                 mydepgraph.break_refs(dropped_tasks)
11501                 self._mergelist = mylist
11502                 self._set_digraph(mydepgraph.schedulerGraph())
11503
11504                 msg_width = 75
11505                 for task in dropped_tasks:
11506                         if not (isinstance(task, Package) and task.operation == "merge"):
11507                                 continue
11508                         pkg = task
11509                         msg = "emerge --keep-going:" + \
11510                                 " %s" % (pkg.cpv,)
11511                         if pkg.root != "/":
11512                                 msg += " for %s" % (pkg.root,)
11513                         msg += " dropped due to unsatisfied dependency."
11514                         for line in textwrap.wrap(msg, msg_width):
11515                                 eerror(line, phase="other", key=pkg.cpv)
11516                         settings = self.pkgsettings[pkg.root]
11517                         # Ensure that log collection from $T is disabled inside
11518                         # elog_process(), since any logs that might exist are
11519                         # not valid here.
11520                         settings.pop("T", None)
11521                         portage.elog.elog_process(pkg.cpv, settings)
11522                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11523
11524                 return True
11525
11526         def _show_list(self):
11527                 myopts = self.myopts
11528                 if "--quiet" not in myopts and \
11529                         ("--ask" in myopts or "--tree" in myopts or \
11530                         "--verbose" in myopts):
11531                         return True
11532                 return False
11533
11534         def _world_atom(self, pkg):
11535                 """
11536                 Add the package to the world file, but only if
11537                 it's supposed to be added. Otherwise, do nothing.
11538                 """
11539
11540                 if set(("--buildpkgonly", "--fetchonly",
11541                         "--fetch-all-uri",
11542                         "--oneshot", "--onlydeps",
11543                         "--pretend")).intersection(self.myopts):
11544                         return
11545
11546                 if pkg.root != self.target_root:
11547                         return
11548
11549                 args_set = self._args_set
11550                 if not args_set.findAtomForPackage(pkg):
11551                         return
11552
11553                 logger = self._logger
11554                 pkg_count = self._pkg_count
11555                 root_config = pkg.root_config
11556                 world_set = root_config.sets["world"]
11557                 world_locked = False
11558                 if hasattr(world_set, "lock"):
11559                         world_set.lock()
11560                         world_locked = True
11561
11562                 try:
11563                         if hasattr(world_set, "load"):
11564                                 world_set.load() # maybe it's changed on disk
11565
11566                         atom = create_world_atom(pkg, args_set, root_config)
11567                         if atom:
11568                                 if hasattr(world_set, "add"):
11569                                         self._status_msg(('Recording %s in "world" ' + \
11570                                                 'favorites file...') % atom)
11571                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11572                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11573                                         world_set.add(atom)
11574                                 else:
11575                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11576                                                 (atom,), level=logging.WARN, noiselevel=-1)
11577                 finally:
11578                         if world_locked:
11579                                 world_set.unlock()
11580
11581         def _pkg(self, cpv, type_name, root_config, installed=False):
11582                 """
11583                 Get a package instance from the cache, or create a new
11584                 one if necessary. Raises KeyError from aux_get if it
11585                 failures for some reason (package does not exist or is
11586                 corrupt).
11587                 """
11588                 operation = "merge"
11589                 if installed:
11590                         operation = "nomerge"
11591
11592                 if self._digraph is not None:
11593                         # Reuse existing instance when available.
11594                         pkg = self._digraph.get(
11595                                 (type_name, root_config.root, cpv, operation))
11596                         if pkg is not None:
11597                                 return pkg
11598
11599                 tree_type = depgraph.pkg_tree_map[type_name]
11600                 db = root_config.trees[tree_type].dbapi
11601                 db_keys = list(self.trees[root_config.root][
11602                         tree_type].dbapi._aux_cache_keys)
11603                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11604                 pkg = Package(cpv=cpv, metadata=metadata,
11605                         root_config=root_config, installed=installed)
11606                 if type_name == "ebuild":
11607                         settings = self.pkgsettings[root_config.root]
11608                         settings.setcpv(pkg)
11609                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11610                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11611
11612                 return pkg
11613
11614 class MetadataRegen(PollScheduler):
11615
11616         def __init__(self, portdb, cp_iter=None, consumer=None,
11617                 max_jobs=None, max_load=None):
11618                 PollScheduler.__init__(self)
11619                 self._portdb = portdb
11620                 self._global_cleanse = False
11621                 if cp_iter is None:
11622                         cp_iter = self._iter_every_cp()
11623                         # We can globally cleanse stale cache only if we
11624                         # iterate over every single cp.
11625                         self._global_cleanse = True
11626                 self._cp_iter = cp_iter
11627                 self._consumer = consumer
11628
11629                 if max_jobs is None:
11630                         max_jobs = 1
11631
11632                 self._max_jobs = max_jobs
11633                 self._max_load = max_load
11634                 self._sched_iface = self._sched_iface_class(
11635                         register=self._register,
11636                         schedule=self._schedule_wait,
11637                         unregister=self._unregister)
11638
11639                 self._valid_pkgs = set()
11640                 self._cp_set = set()
11641                 self._process_iter = self._iter_metadata_processes()
11642                 self.returncode = os.EX_OK
11643                 self._error_count = 0
11644
11645         def _iter_every_cp(self):
11646                 every_cp = self._portdb.cp_all()
11647                 every_cp.sort(reverse=True)
11648                 try:
11649                         while True:
11650                                 yield every_cp.pop()
11651                 except IndexError:
11652                         pass
11653
11654         def _iter_metadata_processes(self):
11655                 portdb = self._portdb
11656                 valid_pkgs = self._valid_pkgs
11657                 cp_set = self._cp_set
11658                 consumer = self._consumer
11659
11660                 for cp in self._cp_iter:
11661                         cp_set.add(cp)
11662                         portage.writemsg_stdout("Processing %s\n" % cp)
11663                         cpv_list = portdb.cp_list(cp)
11664                         for cpv in cpv_list:
11665                                 valid_pkgs.add(cpv)
11666                                 ebuild_path, repo_path = portdb.findname2(cpv)
11667                                 metadata, st, emtime = portdb._pull_valid_cache(
11668                                         cpv, ebuild_path, repo_path)
11669                                 if metadata is not None:
11670                                         if consumer is not None:
11671                                                 consumer(cpv, ebuild_path,
11672                                                         repo_path, metadata)
11673                                         continue
11674
11675                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11676                                         ebuild_mtime=emtime,
11677                                         metadata_callback=portdb._metadata_callback,
11678                                         portdb=portdb, repo_path=repo_path,
11679                                         settings=portdb.doebuild_settings)
11680
11681         def run(self):
11682
11683                 portdb = self._portdb
11684                 from portage.cache.cache_errors import CacheError
11685                 dead_nodes = {}
11686
11687                 while self._schedule():
11688                         self._poll_loop()
11689
11690                 while self._jobs:
11691                         self._poll_loop()
11692
11693                 if self._global_cleanse:
11694                         for mytree in portdb.porttrees:
11695                                 try:
11696                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11697                                 except CacheError, e:
11698                                         portage.writemsg("Error listing cache entries for " + \
11699                                                 "'%s': %s, continuing...\n" % (mytree, e),
11700                                                 noiselevel=-1)
11701                                         del e
11702                                         dead_nodes = None
11703                                         break
11704                 else:
11705                         cp_set = self._cp_set
11706                         cpv_getkey = portage.cpv_getkey
11707                         for mytree in portdb.porttrees:
11708                                 try:
11709                                         dead_nodes[mytree] = set(cpv for cpv in \
11710                                                 portdb.auxdb[mytree].iterkeys() \
11711                                                 if cpv_getkey(cpv) in cp_set)
11712                                 except CacheError, e:
11713                                         portage.writemsg("Error listing cache entries for " + \
11714                                                 "'%s': %s, continuing...\n" % (mytree, e),
11715                                                 noiselevel=-1)
11716                                         del e
11717                                         dead_nodes = None
11718                                         break
11719
11720                 if dead_nodes:
11721                         for y in self._valid_pkgs:
11722                                 for mytree in portdb.porttrees:
11723                                         if portdb.findname2(y, mytree=mytree)[0]:
11724                                                 dead_nodes[mytree].discard(y)
11725
11726                         for mytree, nodes in dead_nodes.iteritems():
11727                                 auxdb = portdb.auxdb[mytree]
11728                                 for y in nodes:
11729                                         try:
11730                                                 del auxdb[y]
11731                                         except (KeyError, CacheError):
11732                                                 pass
11733
11734         def _schedule_tasks(self):
11735                 """
11736                 @rtype: bool
11737                 @returns: True if there may be remaining tasks to schedule,
11738                         False otherwise.
11739                 """
11740                 while self._can_add_job():
11741                         try:
11742                                 metadata_process = self._process_iter.next()
11743                         except StopIteration:
11744                                 return False
11745
11746                         self._jobs += 1
11747                         metadata_process.scheduler = self._sched_iface
11748                         metadata_process.addExitListener(self._metadata_exit)
11749                         metadata_process.start()
11750                 return True
11751
11752         def _metadata_exit(self, metadata_process):
11753                 self._jobs -= 1
11754                 if metadata_process.returncode != os.EX_OK:
11755                         self.returncode = 1
11756                         self._error_count += 1
11757                         self._valid_pkgs.discard(metadata_process.cpv)
11758                         portage.writemsg("Error processing %s, continuing...\n" % \
11759                                 (metadata_process.cpv,), noiselevel=-1)
11760
11761                 if self._consumer is not None:
11762                         # On failure, still notify the consumer (in this case the metadata
11763                         # argument is None).
11764                         self._consumer(metadata_process.cpv,
11765                                 metadata_process.ebuild_path,
11766                                 metadata_process.repo_path,
11767                                 metadata_process.metadata)
11768
11769                 self._schedule()
11770
11771 class UninstallFailure(portage.exception.PortageException):
11772         """
11773         An instance of this class is raised by unmerge() when
11774         an uninstallation fails.
11775         """
11776         status = 1
11777         def __init__(self, *pargs):
11778                 portage.exception.PortageException.__init__(self, pargs)
11779                 if pargs:
11780                         self.status = pargs[0]
11781
11782 def unmerge(root_config, myopts, unmerge_action,
11783         unmerge_files, ldpath_mtimes, autoclean=0,
11784         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11785         scheduler=None, writemsg_level=portage.util.writemsg_level):
11786
11787         quiet = "--quiet" in myopts
11788         settings = root_config.settings
11789         sets = root_config.sets
11790         vartree = root_config.trees["vartree"]
11791         candidate_catpkgs=[]
11792         global_unmerge=0
11793         xterm_titles = "notitles" not in settings.features
11794         out = portage.output.EOutput()
11795         pkg_cache = {}
11796         db_keys = list(vartree.dbapi._aux_cache_keys)
11797
11798         def _pkg(cpv):
11799                 pkg = pkg_cache.get(cpv)
11800                 if pkg is None:
11801                         pkg = Package(cpv=cpv, installed=True,
11802                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11803                                 root_config=root_config,
11804                                 type_name="installed")
11805                         pkg_cache[cpv] = pkg
11806                 return pkg
11807
11808         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11809         try:
11810                 # At least the parent needs to exist for the lock file.
11811                 portage.util.ensure_dirs(vdb_path)
11812         except portage.exception.PortageException:
11813                 pass
11814         vdb_lock = None
11815         try:
11816                 if os.access(vdb_path, os.W_OK):
11817                         vdb_lock = portage.locks.lockdir(vdb_path)
11818                 realsyslist = sets["system"].getAtoms()
11819                 syslist = []
11820                 for x in realsyslist:
11821                         mycp = portage.dep_getkey(x)
11822                         if mycp in settings.getvirtuals():
11823                                 providers = []
11824                                 for provider in settings.getvirtuals()[mycp]:
11825                                         if vartree.dbapi.match(provider):
11826                                                 providers.append(provider)
11827                                 if len(providers) == 1:
11828                                         syslist.extend(providers)
11829                         else:
11830                                 syslist.append(mycp)
11831         
11832                 mysettings = portage.config(clone=settings)
11833         
11834                 if not unmerge_files:
11835                         if unmerge_action == "unmerge":
11836                                 print
11837                                 print bold("emerge unmerge") + " can only be used with specific package names"
11838                                 print
11839                                 return 0
11840                         else:
11841                                 global_unmerge = 1
11842         
11843                 localtree = vartree
11844                 # process all arguments and add all
11845                 # valid db entries to candidate_catpkgs
11846                 if global_unmerge:
11847                         if not unmerge_files:
11848                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11849                 else:
11850                         #we've got command-line arguments
11851                         if not unmerge_files:
11852                                 print "\nNo packages to unmerge have been provided.\n"
11853                                 return 0
11854                         for x in unmerge_files:
11855                                 arg_parts = x.split('/')
11856                                 if x[0] not in [".","/"] and \
11857                                         arg_parts[-1][-7:] != ".ebuild":
11858                                         #possible cat/pkg or dep; treat as such
11859                                         candidate_catpkgs.append(x)
11860                                 elif unmerge_action in ["prune","clean"]:
11861                                         print "\n!!! Prune and clean do not accept individual" + \
11862                                                 " ebuilds as arguments;\n    skipping.\n"
11863                                         continue
11864                                 else:
11865                                         # it appears that the user is specifying an installed
11866                                         # ebuild and we're in "unmerge" mode, so it's ok.
11867                                         if not os.path.exists(x):
11868                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11869                                                 return 0
11870         
11871                                         absx   = os.path.abspath(x)
11872                                         sp_absx = absx.split("/")
11873                                         if sp_absx[-1][-7:] == ".ebuild":
11874                                                 del sp_absx[-1]
11875                                                 absx = "/".join(sp_absx)
11876         
11877                                         sp_absx_len = len(sp_absx)
11878         
11879                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11880                                         vdb_len  = len(vdb_path)
11881         
11882                                         sp_vdb     = vdb_path.split("/")
11883                                         sp_vdb_len = len(sp_vdb)
11884         
11885                                         if not os.path.exists(absx+"/CONTENTS"):
11886                                                 print "!!! Not a valid db dir: "+str(absx)
11887                                                 return 0
11888         
11889                                         if sp_absx_len <= sp_vdb_len:
11890                                                 # The Path is shorter... so it can't be inside the vdb.
11891                                                 print sp_absx
11892                                                 print absx
11893                                                 print "\n!!!",x,"cannot be inside "+ \
11894                                                         vdb_path+"; aborting.\n"
11895                                                 return 0
11896         
11897                                         for idx in range(0,sp_vdb_len):
11898                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11899                                                         print sp_absx
11900                                                         print absx
11901                                                         print "\n!!!", x, "is not inside "+\
11902                                                                 vdb_path+"; aborting.\n"
11903                                                         return 0
11904         
11905                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11906                                         candidate_catpkgs.append(
11907                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11908         
11909                 newline=""
11910                 if (not "--quiet" in myopts):
11911                         newline="\n"
11912                 if settings["ROOT"] != "/":
11913                         writemsg_level(darkgreen(newline+ \
11914                                 ">>> Using system located in ROOT tree %s\n" % \
11915                                 settings["ROOT"]))
11916
11917                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11918                         not ("--quiet" in myopts):
11919                         writemsg_level(darkgreen(newline+\
11920                                 ">>> These are the packages that would be unmerged:\n"))
11921
11922                 # Preservation of order is required for --depclean and --prune so
11923                 # that dependencies are respected. Use all_selected to eliminate
11924                 # duplicate packages since the same package may be selected by
11925                 # multiple atoms.
11926                 pkgmap = []
11927                 all_selected = set()
11928                 for x in candidate_catpkgs:
11929                         # cycle through all our candidate deps and determine
11930                         # what will and will not get unmerged
11931                         try:
11932                                 mymatch = vartree.dbapi.match(x)
11933                         except portage.exception.AmbiguousPackageName, errpkgs:
11934                                 print "\n\n!!! The short ebuild name \"" + \
11935                                         x + "\" is ambiguous.  Please specify"
11936                                 print "!!! one of the following fully-qualified " + \
11937                                         "ebuild names instead:\n"
11938                                 for i in errpkgs[0]:
11939                                         print "    " + green(i)
11940                                 print
11941                                 sys.exit(1)
11942         
11943                         if not mymatch and x[0] not in "<>=~":
11944                                 mymatch = localtree.dep_match(x)
11945                         if not mymatch:
11946                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11947                                         (x, unmerge_action), noiselevel=-1)
11948                                 continue
11949
11950                         pkgmap.append(
11951                                 {"protected": set(), "selected": set(), "omitted": set()})
11952                         mykey = len(pkgmap) - 1
11953                         if unmerge_action=="unmerge":
11954                                         for y in mymatch:
11955                                                 if y not in all_selected:
11956                                                         pkgmap[mykey]["selected"].add(y)
11957                                                         all_selected.add(y)
11958                         elif unmerge_action == "prune":
11959                                 if len(mymatch) == 1:
11960                                         continue
11961                                 best_version = mymatch[0]
11962                                 best_slot = vartree.getslot(best_version)
11963                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11964                                 for mypkg in mymatch[1:]:
11965                                         myslot = vartree.getslot(mypkg)
11966                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11967                                         if (myslot == best_slot and mycounter > best_counter) or \
11968                                                 mypkg == portage.best([mypkg, best_version]):
11969                                                 if myslot == best_slot:
11970                                                         if mycounter < best_counter:
11971                                                                 # On slot collision, keep the one with the
11972                                                                 # highest counter since it is the most
11973                                                                 # recently installed.
11974                                                                 continue
11975                                                 best_version = mypkg
11976                                                 best_slot = myslot
11977                                                 best_counter = mycounter
11978                                 pkgmap[mykey]["protected"].add(best_version)
11979                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11980                                         if mypkg != best_version and mypkg not in all_selected)
11981                                 all_selected.update(pkgmap[mykey]["selected"])
11982                         else:
11983                                 # unmerge_action == "clean"
11984                                 slotmap={}
11985                                 for mypkg in mymatch:
11986                                         if unmerge_action == "clean":
11987                                                 myslot = localtree.getslot(mypkg)
11988                                         else:
11989                                                 # since we're pruning, we don't care about slots
11990                                                 # and put all the pkgs in together
11991                                                 myslot = 0
11992                                         if myslot not in slotmap:
11993                                                 slotmap[myslot] = {}
11994                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11995
11996                                 for mypkg in vartree.dbapi.cp_list(
11997                                         portage.dep_getkey(mymatch[0])):
11998                                         myslot = vartree.getslot(mypkg)
11999                                         if myslot not in slotmap:
12000                                                 slotmap[myslot] = {}
12001                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12002
12003                                 for myslot in slotmap:
12004                                         counterkeys = slotmap[myslot].keys()
12005                                         if not counterkeys:
12006                                                 continue
12007                                         counterkeys.sort()
12008                                         pkgmap[mykey]["protected"].add(
12009                                                 slotmap[myslot][counterkeys[-1]])
12010                                         del counterkeys[-1]
12011
12012                                         for counter in counterkeys[:]:
12013                                                 mypkg = slotmap[myslot][counter]
12014                                                 if mypkg not in mymatch:
12015                                                         counterkeys.remove(counter)
12016                                                         pkgmap[mykey]["protected"].add(
12017                                                                 slotmap[myslot][counter])
12018
12019                                         #be pretty and get them in order of merge:
12020                                         for ckey in counterkeys:
12021                                                 mypkg = slotmap[myslot][ckey]
12022                                                 if mypkg not in all_selected:
12023                                                         pkgmap[mykey]["selected"].add(mypkg)
12024                                                         all_selected.add(mypkg)
12025                                         # ok, now the last-merged package
12026                                         # is protected, and the rest are selected
12027                 numselected = len(all_selected)
12028                 if global_unmerge and not numselected:
12029                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12030                         return 0
12031         
12032                 if not numselected:
12033                         portage.writemsg_stdout(
12034                                 "\n>>> No packages selected for removal by " + \
12035                                 unmerge_action + "\n")
12036                         return 0
12037         finally:
12038                 if vdb_lock:
12039                         vartree.dbapi.flush_cache()
12040                         portage.locks.unlockdir(vdb_lock)
12041         
12042         from portage.sets.base import EditablePackageSet
12043         
12044         # generate a list of package sets that are directly or indirectly listed in "world",
12045         # as there is no persistent list of "installed" sets
12046         installed_sets = ["world"]
12047         stop = False
12048         pos = 0
12049         while not stop:
12050                 stop = True
12051                 pos = len(installed_sets)
12052                 for s in installed_sets[pos - 1:]:
12053                         if s not in sets:
12054                                 continue
12055                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12056                         if candidates:
12057                                 stop = False
12058                                 installed_sets += candidates
12059         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12060         del stop, pos
12061
12062         # we don't want to unmerge packages that are still listed in user-editable package sets
12063         # listed in "world" as they would be remerged on the next update of "world" or the 
12064         # relevant package sets.
12065         unknown_sets = set()
12066         for cp in xrange(len(pkgmap)):
12067                 for cpv in pkgmap[cp]["selected"].copy():
12068                         try:
12069                                 pkg = _pkg(cpv)
12070                         except KeyError:
12071                                 # It could have been uninstalled
12072                                 # by a concurrent process.
12073                                 continue
12074
12075                         if unmerge_action != "clean" and \
12076                                 root_config.root == "/" and \
12077                                 portage.match_from_list(
12078                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12079                                 msg = ("Not unmerging package %s since there is no valid " + \
12080                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12081                                 for line in textwrap.wrap(msg, 75):
12082                                         out.eerror(line)
12083                                 # adjust pkgmap so the display output is correct
12084                                 pkgmap[cp]["selected"].remove(cpv)
12085                                 all_selected.remove(cpv)
12086                                 pkgmap[cp]["protected"].add(cpv)
12087                                 continue
12088
12089                         parents = []
12090                         for s in installed_sets:
12091                                 # skip sets that the user requested to unmerge, and skip world 
12092                                 # unless we're unmerging a package set (as the package would be 
12093                                 # removed from "world" later on)
12094                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12095                                         continue
12096
12097                                 if s not in sets:
12098                                         if s in unknown_sets:
12099                                                 continue
12100                                         unknown_sets.add(s)
12101                                         out = portage.output.EOutput()
12102                                         out.eerror(("Unknown set '@%s' in " + \
12103                                                 "%svar/lib/portage/world_sets") % \
12104                                                 (s, root_config.root))
12105                                         continue
12106
12107                                 # only check instances of EditablePackageSet as other classes are generally used for
12108                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12109                                 # user can't do much about them anyway)
12110                                 if isinstance(sets[s], EditablePackageSet):
12111
12112                                         # This is derived from a snippet of code in the
12113                                         # depgraph._iter_atoms_for_pkg() method.
12114                                         for atom in sets[s].iterAtomsForPackage(pkg):
12115                                                 inst_matches = vartree.dbapi.match(atom)
12116                                                 inst_matches.reverse() # descending order
12117                                                 higher_slot = None
12118                                                 for inst_cpv in inst_matches:
12119                                                         try:
12120                                                                 inst_pkg = _pkg(inst_cpv)
12121                                                         except KeyError:
12122                                                                 # It could have been uninstalled
12123                                                                 # by a concurrent process.
12124                                                                 continue
12125
12126                                                         if inst_pkg.cp != atom.cp:
12127                                                                 continue
12128                                                         if pkg >= inst_pkg:
12129                                                                 # This is descending order, and we're not
12130                                                                 # interested in any versions <= pkg given.
12131                                                                 break
12132                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12133                                                                 higher_slot = inst_pkg
12134                                                                 break
12135                                                 if higher_slot is None:
12136                                                         parents.append(s)
12137                                                         break
12138                         if parents:
12139                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12140                                 #print colorize("WARN", "but still listed in the following package sets:")
12141                                 #print "    %s\n" % ", ".join(parents)
12142                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12143                                 print colorize("WARN", "still referenced by the following package sets:")
12144                                 print "    %s\n" % ", ".join(parents)
12145                                 # adjust pkgmap so the display output is correct
12146                                 pkgmap[cp]["selected"].remove(cpv)
12147                                 all_selected.remove(cpv)
12148                                 pkgmap[cp]["protected"].add(cpv)
12149         
12150         del installed_sets
12151
12152         numselected = len(all_selected)
12153         if not numselected:
12154                 writemsg_level(
12155                         "\n>>> No packages selected for removal by " + \
12156                         unmerge_action + "\n")
12157                 return 0
12158
12159         # Unmerge order only matters in some cases
12160         if not ordered:
12161                 unordered = {}
12162                 for d in pkgmap:
12163                         selected = d["selected"]
12164                         if not selected:
12165                                 continue
12166                         cp = portage.cpv_getkey(iter(selected).next())
12167                         cp_dict = unordered.get(cp)
12168                         if cp_dict is None:
12169                                 cp_dict = {}
12170                                 unordered[cp] = cp_dict
12171                                 for k in d:
12172                                         cp_dict[k] = set()
12173                         for k, v in d.iteritems():
12174                                 cp_dict[k].update(v)
12175                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12176
12177         for x in xrange(len(pkgmap)):
12178                 selected = pkgmap[x]["selected"]
12179                 if not selected:
12180                         continue
12181                 for mytype, mylist in pkgmap[x].iteritems():
12182                         if mytype == "selected":
12183                                 continue
12184                         mylist.difference_update(all_selected)
12185                 cp = portage.cpv_getkey(iter(selected).next())
12186                 for y in localtree.dep_match(cp):
12187                         if y not in pkgmap[x]["omitted"] and \
12188                                 y not in pkgmap[x]["selected"] and \
12189                                 y not in pkgmap[x]["protected"] and \
12190                                 y not in all_selected:
12191                                 pkgmap[x]["omitted"].add(y)
12192                 if global_unmerge and not pkgmap[x]["selected"]:
12193                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12194                         continue
12195                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12196                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12197                                 "'%s' is part of your system profile.\n" % cp),
12198                                 level=logging.WARNING, noiselevel=-1)
12199                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12200                                 "be damaging to your system.\n\n"),
12201                                 level=logging.WARNING, noiselevel=-1)
12202                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12203                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12204                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12205                 if not quiet:
12206                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12207                 else:
12208                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12209                 for mytype in ["selected","protected","omitted"]:
12210                         if not quiet:
12211                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12212                         if pkgmap[x][mytype]:
12213                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12214                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12215                                 for pn, ver, rev in sorted_pkgs:
12216                                         if rev == "r0":
12217                                                 myversion = ver
12218                                         else:
12219                                                 myversion = ver + "-" + rev
12220                                         if mytype == "selected":
12221                                                 writemsg_level(
12222                                                         colorize("UNMERGE_WARN", myversion + " "),
12223                                                         noiselevel=-1)
12224                                         else:
12225                                                 writemsg_level(
12226                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12227                         else:
12228                                 writemsg_level("none ", noiselevel=-1)
12229                         if not quiet:
12230                                 writemsg_level("\n", noiselevel=-1)
12231                 if quiet:
12232                         writemsg_level("\n", noiselevel=-1)
12233
12234         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12235                 " packages are slated for removal.\n")
12236         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12237                         " and " + colorize("GOOD", "'omitted'") + \
12238                         " packages will not be removed.\n\n")
12239
12240         if "--pretend" in myopts:
12241                 #we're done... return
12242                 return 0
12243         if "--ask" in myopts:
12244                 if userquery("Would you like to unmerge these packages?")=="No":
12245                         # enter pretend mode for correct formatting of results
12246                         myopts["--pretend"] = True
12247                         print
12248                         print "Quitting."
12249                         print
12250                         return 0
12251         #the real unmerging begins, after a short delay....
12252         if clean_delay and not autoclean:
12253                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12254
12255         for x in xrange(len(pkgmap)):
12256                 for y in pkgmap[x]["selected"]:
12257                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12258                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12259                         mysplit = y.split("/")
12260                         #unmerge...
12261                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12262                                 mysettings, unmerge_action not in ["clean","prune"],
12263                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12264                                 scheduler=scheduler)
12265
12266                         if retval != os.EX_OK:
12267                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12268                                 if raise_on_error:
12269                                         raise UninstallFailure(retval)
12270                                 sys.exit(retval)
12271                         else:
12272                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12273                                         sets["world"].cleanPackage(vartree.dbapi, y)
12274                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12275         if clean_world and hasattr(sets["world"], "remove"):
12276                 for s in root_config.setconfig.active:
12277                         sets["world"].remove(SETPREFIX+s)
12278         return 1
12279
12280 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12281
12282         if os.path.exists("/usr/bin/install-info"):
12283                 out = portage.output.EOutput()
12284                 regen_infodirs=[]
12285                 for z in infodirs:
12286                         if z=='':
12287                                 continue
12288                         inforoot=normpath(root+z)
12289                         if os.path.isdir(inforoot):
12290                                 infomtime = long(os.stat(inforoot).st_mtime)
12291                                 if inforoot not in prev_mtimes or \
12292                                         prev_mtimes[inforoot] != infomtime:
12293                                                 regen_infodirs.append(inforoot)
12294
12295                 if not regen_infodirs:
12296                         portage.writemsg_stdout("\n")
12297                         out.einfo("GNU info directory index is up-to-date.")
12298                 else:
12299                         portage.writemsg_stdout("\n")
12300                         out.einfo("Regenerating GNU info directory index...")
12301
12302                         dir_extensions = ("", ".gz", ".bz2")
12303                         icount=0
12304                         badcount=0
12305                         errmsg = ""
12306                         for inforoot in regen_infodirs:
12307                                 if inforoot=='':
12308                                         continue
12309
12310                                 if not os.path.isdir(inforoot) or \
12311                                         not os.access(inforoot, os.W_OK):
12312                                         continue
12313
12314                                 file_list = os.listdir(inforoot)
12315                                 file_list.sort()
12316                                 dir_file = os.path.join(inforoot, "dir")
12317                                 moved_old_dir = False
12318                                 processed_count = 0
12319                                 for x in file_list:
12320                                         if x.startswith(".") or \
12321                                                 os.path.isdir(os.path.join(inforoot, x)):
12322                                                 continue
12323                                         if x.startswith("dir"):
12324                                                 skip = False
12325                                                 for ext in dir_extensions:
12326                                                         if x == "dir" + ext or \
12327                                                                 x == "dir" + ext + ".old":
12328                                                                 skip = True
12329                                                                 break
12330                                                 if skip:
12331                                                         continue
12332                                         if processed_count == 0:
12333                                                 for ext in dir_extensions:
12334                                                         try:
12335                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12336                                                                 moved_old_dir = True
12337                                                         except EnvironmentError, e:
12338                                                                 if e.errno != errno.ENOENT:
12339                                                                         raise
12340                                                                 del e
12341                                         processed_count += 1
12342                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12343                                         existsstr="already exists, for file `"
12344                                         if myso!="":
12345                                                 if re.search(existsstr,myso):
12346                                                         # Already exists... Don't increment the count for this.
12347                                                         pass
12348                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12349                                                         # This info file doesn't contain a DIR-header: install-info produces this
12350                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12351                                                         # Don't increment the count for this.
12352                                                         pass
12353                                                 else:
12354                                                         badcount=badcount+1
12355                                                         errmsg += myso + "\n"
12356                                         icount=icount+1
12357
12358                                 if moved_old_dir and not os.path.exists(dir_file):
12359                                         # We didn't generate a new dir file, so put the old file
12360                                         # back where it was originally found.
12361                                         for ext in dir_extensions:
12362                                                 try:
12363                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12364                                                 except EnvironmentError, e:
12365                                                         if e.errno != errno.ENOENT:
12366                                                                 raise
12367                                                         del e
12368
12369                                 # Clean dir.old cruft so that they don't prevent
12370                                 # unmerge of otherwise empty directories.
12371                                 for ext in dir_extensions:
12372                                         try:
12373                                                 os.unlink(dir_file + ext + ".old")
12374                                         except EnvironmentError, e:
12375                                                 if e.errno != errno.ENOENT:
12376                                                         raise
12377                                                 del e
12378
12379                                 #update mtime so we can potentially avoid regenerating.
12380                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12381
12382                         if badcount:
12383                                 out.eerror("Processed %d info files; %d errors." % \
12384                                         (icount, badcount))
12385                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12386                         else:
12387                                 if icount > 0:
12388                                         out.einfo("Processed %d info files." % (icount,))
12389
12390
12391 def display_news_notification(root_config, myopts):
12392         target_root = root_config.root
12393         trees = root_config.trees
12394         settings = trees["vartree"].settings
12395         portdb = trees["porttree"].dbapi
12396         vardb = trees["vartree"].dbapi
12397         NEWS_PATH = os.path.join("metadata", "news")
12398         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12399         newsReaderDisplay = False
12400         update = "--pretend" not in myopts
12401
12402         for repo in portdb.getRepositories():
12403                 unreadItems = checkUpdatedNewsItems(
12404                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12405                 if unreadItems:
12406                         if not newsReaderDisplay:
12407                                 newsReaderDisplay = True
12408                                 print
12409                         print colorize("WARN", " * IMPORTANT:"),
12410                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12411                         
12412         
12413         if newsReaderDisplay:
12414                 print colorize("WARN", " *"),
12415                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12416                 print
12417
12418 def display_preserved_libs(vardbapi):
12419         MAX_DISPLAY = 3
12420
12421         # Ensure the registry is consistent with existing files.
12422         vardbapi.plib_registry.pruneNonExisting()
12423
12424         if vardbapi.plib_registry.hasEntries():
12425                 print
12426                 print colorize("WARN", "!!!") + " existing preserved libs:"
12427                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12428                 linkmap = vardbapi.linkmap
12429                 consumer_map = {}
12430                 owners = {}
12431                 linkmap_broken = False
12432
12433                 try:
12434                         linkmap.rebuild()
12435                 except portage.exception.CommandNotFound, e:
12436                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12437                                 level=logging.ERROR, noiselevel=-1)
12438                         del e
12439                         linkmap_broken = True
12440                 else:
12441                         search_for_owners = set()
12442                         for cpv in plibdata:
12443                                 internal_plib_keys = set(linkmap._obj_key(f) \
12444                                         for f in plibdata[cpv])
12445                                 for f in plibdata[cpv]:
12446                                         if f in consumer_map:
12447                                                 continue
12448                                         consumers = []
12449                                         for c in linkmap.findConsumers(f):
12450                                                 # Filter out any consumers that are also preserved libs
12451                                                 # belonging to the same package as the provider.
12452                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12453                                                         consumers.append(c)
12454                                         consumers.sort()
12455                                         consumer_map[f] = consumers
12456                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12457
12458                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12459
12460                 for cpv in plibdata:
12461                         print colorize("WARN", ">>>") + " package: %s" % cpv
12462                         samefile_map = {}
12463                         for f in plibdata[cpv]:
12464                                 obj_key = linkmap._obj_key(f)
12465                                 alt_paths = samefile_map.get(obj_key)
12466                                 if alt_paths is None:
12467                                         alt_paths = set()
12468                                         samefile_map[obj_key] = alt_paths
12469                                 alt_paths.add(f)
12470
12471                         for alt_paths in samefile_map.itervalues():
12472                                 alt_paths = sorted(alt_paths)
12473                                 for p in alt_paths:
12474                                         print colorize("WARN", " * ") + " - %s" % (p,)
12475                                 f = alt_paths[0]
12476                                 consumers = consumer_map.get(f, [])
12477                                 for c in consumers[:MAX_DISPLAY]:
12478                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12479                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12480                                 if len(consumers) == MAX_DISPLAY + 1:
12481                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12482                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12483                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12484                                 elif len(consumers) > MAX_DISPLAY:
12485                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12486                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12487
12488
12489 def _flush_elog_mod_echo():
12490         """
12491         Dump the mod_echo output now so that our other
12492         notifications are shown last.
12493         @rtype: bool
12494         @returns: True if messages were shown, False otherwise.
12495         """
12496         messages_shown = False
12497         try:
12498                 from portage.elog import mod_echo
12499         except ImportError:
12500                 pass # happens during downgrade to a version without the module
12501         else:
12502                 messages_shown = bool(mod_echo._items)
12503                 mod_echo.finalize()
12504         return messages_shown
12505
12506 def post_emerge(root_config, myopts, mtimedb, retval):
12507         """
12508         Misc. things to run at the end of a merge session.
12509         
12510         Update Info Files
12511         Update Config Files
12512         Update News Items
12513         Commit mtimeDB
12514         Display preserved libs warnings
12515         Exit Emerge
12516
12517         @param trees: A dictionary mapping each ROOT to it's package databases
12518         @type trees: dict
12519         @param mtimedb: The mtimeDB to store data needed across merge invocations
12520         @type mtimedb: MtimeDB class instance
12521         @param retval: Emerge's return value
12522         @type retval: Int
12523         @rype: None
12524         @returns:
12525         1.  Calls sys.exit(retval)
12526         """
12527
12528         target_root = root_config.root
12529         trees = { target_root : root_config.trees }
12530         vardbapi = trees[target_root]["vartree"].dbapi
12531         settings = vardbapi.settings
12532         info_mtimes = mtimedb["info"]
12533
12534         # Load the most current variables from ${ROOT}/etc/profile.env
12535         settings.unlock()
12536         settings.reload()
12537         settings.regenerate()
12538         settings.lock()
12539
12540         config_protect = settings.get("CONFIG_PROTECT","").split()
12541         infodirs = settings.get("INFOPATH","").split(":") + \
12542                 settings.get("INFODIR","").split(":")
12543
12544         os.chdir("/")
12545
12546         if retval == os.EX_OK:
12547                 exit_msg = " *** exiting successfully."
12548         else:
12549                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12550         emergelog("notitles" not in settings.features, exit_msg)
12551
12552         _flush_elog_mod_echo()
12553
12554         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12555         if "--pretend" in myopts or (counter_hash is not None and \
12556                 counter_hash == vardbapi._counter_hash()):
12557                 display_news_notification(root_config, myopts)
12558                 # If vdb state has not changed then there's nothing else to do.
12559                 sys.exit(retval)
12560
12561         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12562         portage.util.ensure_dirs(vdb_path)
12563         vdb_lock = None
12564         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12565                 vdb_lock = portage.locks.lockdir(vdb_path)
12566
12567         if vdb_lock:
12568                 try:
12569                         if "noinfo" not in settings.features:
12570                                 chk_updated_info_files(target_root,
12571                                         infodirs, info_mtimes, retval)
12572                         mtimedb.commit()
12573                 finally:
12574                         if vdb_lock:
12575                                 portage.locks.unlockdir(vdb_lock)
12576
12577         chk_updated_cfg_files(target_root, config_protect)
12578         
12579         display_news_notification(root_config, myopts)
12580         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12581                 display_preserved_libs(vardbapi)        
12582
12583         sys.exit(retval)
12584
12585
12586 def chk_updated_cfg_files(target_root, config_protect):
12587         if config_protect:
12588                 #number of directories with some protect files in them
12589                 procount=0
12590                 for x in config_protect:
12591                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12592                         if not os.access(x, os.W_OK):
12593                                 # Avoid Permission denied errors generated
12594                                 # later by `find`.
12595                                 continue
12596                         try:
12597                                 mymode = os.lstat(x).st_mode
12598                         except OSError:
12599                                 continue
12600                         if stat.S_ISLNK(mymode):
12601                                 # We want to treat it like a directory if it
12602                                 # is a symlink to an existing directory.
12603                                 try:
12604                                         real_mode = os.stat(x).st_mode
12605                                         if stat.S_ISDIR(real_mode):
12606                                                 mymode = real_mode
12607                                 except OSError:
12608                                         pass
12609                         if stat.S_ISDIR(mymode):
12610                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12611                         else:
12612                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12613                                         os.path.split(x.rstrip(os.path.sep))
12614                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12615                         a = commands.getstatusoutput(mycommand)
12616                         if a[0] != 0:
12617                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12618                                 sys.stderr.flush()
12619                                 # Show the error message alone, sending stdout to /dev/null.
12620                                 os.system(mycommand + " 1>/dev/null")
12621                         else:
12622                                 files = a[1].split('\0')
12623                                 # split always produces an empty string as the last element
12624                                 if files and not files[-1]:
12625                                         del files[-1]
12626                                 if files:
12627                                         procount += 1
12628                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12629                                         if stat.S_ISDIR(mymode):
12630                                                  print "%d config files in '%s' need updating." % \
12631                                                         (len(files), x)
12632                                         else:
12633                                                  print "config file '%s' needs updating." % x
12634
12635                 if procount:
12636                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12637                                 " section of the " + bold("emerge")
12638                         print " "+yellow("*")+" man page to learn how to update config files."
12639
12640 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12641         update=False):
12642         """
12643         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12644         Returns the number of unread (yet relevent) items.
12645         
12646         @param portdb: a portage tree database
12647         @type portdb: pordbapi
12648         @param vardb: an installed package database
12649         @type vardb: vardbapi
12650         @param NEWS_PATH:
12651         @type NEWS_PATH:
12652         @param UNREAD_PATH:
12653         @type UNREAD_PATH:
12654         @param repo_id:
12655         @type repo_id:
12656         @rtype: Integer
12657         @returns:
12658         1.  The number of unread but relevant news items.
12659         
12660         """
12661         from portage.news import NewsManager
12662         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12663         return manager.getUnreadItems( repo_id, update=update )
12664
12665 def insert_category_into_atom(atom, category):
12666         alphanum = re.search(r'\w', atom)
12667         if alphanum:
12668                 ret = atom[:alphanum.start()] + "%s/" % category + \
12669                         atom[alphanum.start():]
12670         else:
12671                 ret = None
12672         return ret
12673
12674 def is_valid_package_atom(x):
12675         if "/" not in x:
12676                 alphanum = re.search(r'\w', x)
12677                 if alphanum:
12678                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12679         return portage.isvalidatom(x)
12680
12681 def show_blocker_docs_link():
12682         print
12683         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12684         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12685         print
12686         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12687         print
12688
12689 def show_mask_docs():
12690         print "For more information, see the MASKED PACKAGES section in the emerge"
12691         print "man page or refer to the Gentoo Handbook."
12692
12693 def action_sync(settings, trees, mtimedb, myopts, myaction):
12694         xterm_titles = "notitles" not in settings.features
12695         emergelog(xterm_titles, " === sync")
12696         myportdir = settings.get("PORTDIR", None)
12697         out = portage.output.EOutput()
12698         if not myportdir:
12699                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12700                 sys.exit(1)
12701         if myportdir[-1]=="/":
12702                 myportdir=myportdir[:-1]
12703         try:
12704                 st = os.stat(myportdir)
12705         except OSError:
12706                 st = None
12707         if st is None:
12708                 print ">>>",myportdir,"not found, creating it."
12709                 os.makedirs(myportdir,0755)
12710                 st = os.stat(myportdir)
12711
12712         spawn_kwargs = {}
12713         spawn_kwargs["env"] = settings.environ()
12714         if 'usersync' in settings.features and \
12715                 portage.data.secpass >= 2 and \
12716                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12717                 st.st_gid != os.getgid() and st.st_mode & 0070):
12718                 try:
12719                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12720                 except KeyError:
12721                         pass
12722                 else:
12723                         # Drop privileges when syncing, in order to match
12724                         # existing uid/gid settings.
12725                         spawn_kwargs["uid"]    = st.st_uid
12726                         spawn_kwargs["gid"]    = st.st_gid
12727                         spawn_kwargs["groups"] = [st.st_gid]
12728                         spawn_kwargs["env"]["HOME"] = homedir
12729                         umask = 0002
12730                         if not st.st_mode & 0020:
12731                                 umask = umask | 0020
12732                         spawn_kwargs["umask"] = umask
12733
12734         syncuri = settings.get("SYNC", "").strip()
12735         if not syncuri:
12736                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12737                         noiselevel=-1, level=logging.ERROR)
12738                 return 1
12739
12740         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12741         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12742
12743         os.umask(0022)
12744         dosyncuri = syncuri
12745         updatecache_flg = False
12746         if myaction == "metadata":
12747                 print "skipping sync"
12748                 updatecache_flg = True
12749         elif ".git" in vcs_dirs:
12750                 # Update existing git repository, and ignore the syncuri. We are
12751                 # going to trust the user and assume that the user is in the branch
12752                 # that he/she wants updated. We'll let the user manage branches with
12753                 # git directly.
12754                 if portage.process.find_binary("git") is None:
12755                         msg = ["Command not found: git",
12756                         "Type \"emerge dev-util/git\" to enable git support."]
12757                         for l in msg:
12758                                 writemsg_level("!!! %s\n" % l,
12759                                         level=logging.ERROR, noiselevel=-1)
12760                         return 1
12761                 msg = ">>> Starting git pull in %s..." % myportdir
12762                 emergelog(xterm_titles, msg )
12763                 writemsg_level(msg + "\n")
12764                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12765                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12766                 if exitcode != os.EX_OK:
12767                         msg = "!!! git pull error in %s." % myportdir
12768                         emergelog(xterm_titles, msg)
12769                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12770                         return exitcode
12771                 msg = ">>> Git pull in %s successful" % myportdir
12772                 emergelog(xterm_titles, msg)
12773                 writemsg_level(msg + "\n")
12774                 exitcode = git_sync_timestamps(settings, myportdir)
12775                 if exitcode == os.EX_OK:
12776                         updatecache_flg = True
12777         elif syncuri[:8]=="rsync://":
12778                 for vcs_dir in vcs_dirs:
12779                         writemsg_level(("!!! %s appears to be under revision " + \
12780                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12781                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12782                         return 1
12783                 if not os.path.exists("/usr/bin/rsync"):
12784                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12785                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12786                         sys.exit(1)
12787                 mytimeout=180
12788
12789                 rsync_opts = []
12790                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12791                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12792                         rsync_opts.extend([
12793                                 "--recursive",    # Recurse directories
12794                                 "--links",        # Consider symlinks
12795                                 "--safe-links",   # Ignore links outside of tree
12796                                 "--perms",        # Preserve permissions
12797                                 "--times",        # Preserive mod times
12798                                 "--compress",     # Compress the data transmitted
12799                                 "--force",        # Force deletion on non-empty dirs
12800                                 "--whole-file",   # Don't do block transfers, only entire files
12801                                 "--delete",       # Delete files that aren't in the master tree
12802                                 "--stats",        # Show final statistics about what was transfered
12803                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12804                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12805                                 "--exclude=/local",       # Exclude local     from consideration
12806                                 "--exclude=/packages",    # Exclude packages  from consideration
12807                         ])
12808
12809                 else:
12810                         # The below validation is not needed when using the above hardcoded
12811                         # defaults.
12812
12813                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12814                         rsync_opts.extend(
12815                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12816                         for opt in ("--recursive", "--times"):
12817                                 if opt not in rsync_opts:
12818                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12819                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12820                                         rsync_opts.append(opt)
12821         
12822                         for exclude in ("distfiles", "local", "packages"):
12823                                 opt = "--exclude=/%s" % exclude
12824                                 if opt not in rsync_opts:
12825                                         portage.writemsg(yellow("WARNING:") + \
12826                                         " adding required option %s not included in "  % opt + \
12827                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12828                                         rsync_opts.append(opt)
12829         
12830                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12831                                 def rsync_opt_startswith(opt_prefix):
12832                                         for x in rsync_opts:
12833                                                 if x.startswith(opt_prefix):
12834                                                         return True
12835                                         return False
12836
12837                                 if not rsync_opt_startswith("--timeout="):
12838                                         rsync_opts.append("--timeout=%d" % mytimeout)
12839
12840                                 for opt in ("--compress", "--whole-file"):
12841                                         if opt not in rsync_opts:
12842                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12843                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12844                                                 rsync_opts.append(opt)
12845
12846                 if "--quiet" in myopts:
12847                         rsync_opts.append("--quiet")    # Shut up a lot
12848                 else:
12849                         rsync_opts.append("--verbose")  # Print filelist
12850
12851                 if "--verbose" in myopts:
12852                         rsync_opts.append("--progress")  # Progress meter for each file
12853
12854                 if "--debug" in myopts:
12855                         rsync_opts.append("--checksum") # Force checksum on all files
12856
12857                 # Real local timestamp file.
12858                 servertimestampfile = os.path.join(
12859                         myportdir, "metadata", "timestamp.chk")
12860
12861                 content = portage.util.grabfile(servertimestampfile)
12862                 mytimestamp = 0
12863                 if content:
12864                         try:
12865                                 mytimestamp = time.mktime(time.strptime(content[0],
12866                                         "%a, %d %b %Y %H:%M:%S +0000"))
12867                         except (OverflowError, ValueError):
12868                                 pass
12869                 del content
12870
12871                 try:
12872                         rsync_initial_timeout = \
12873                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12874                 except ValueError:
12875                         rsync_initial_timeout = 15
12876
12877                 try:
12878                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12879                 except SystemExit, e:
12880                         raise # Needed else can't exit
12881                 except:
12882                         maxretries=3 #default number of retries
12883
12884                 retries=0
12885                 user_name, hostname, port = re.split(
12886                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12887                 if port is None:
12888                         port=""
12889                 if user_name is None:
12890                         user_name=""
12891                 updatecache_flg=True
12892                 all_rsync_opts = set(rsync_opts)
12893                 extra_rsync_opts = shlex.split(
12894                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12895                 all_rsync_opts.update(extra_rsync_opts)
12896                 family = socket.AF_INET
12897                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12898                         family = socket.AF_INET
12899                 elif socket.has_ipv6 and \
12900                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12901                         family = socket.AF_INET6
12902                 ips=[]
12903                 SERVER_OUT_OF_DATE = -1
12904                 EXCEEDED_MAX_RETRIES = -2
12905                 while (1):
12906                         if ips:
12907                                 del ips[0]
12908                         if ips==[]:
12909                                 try:
12910                                         for addrinfo in socket.getaddrinfo(
12911                                                 hostname, None, family, socket.SOCK_STREAM):
12912                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12913                                                         # IPv6 addresses need to be enclosed in square brackets
12914                                                         ips.append("[%s]" % addrinfo[4][0])
12915                                                 else:
12916                                                         ips.append(addrinfo[4][0])
12917                                         from random import shuffle
12918                                         shuffle(ips)
12919                                 except SystemExit, e:
12920                                         raise # Needed else can't exit
12921                                 except Exception, e:
12922                                         print "Notice:",str(e)
12923                                         dosyncuri=syncuri
12924
12925                         if ips:
12926                                 try:
12927                                         dosyncuri = syncuri.replace(
12928                                                 "//" + user_name + hostname + port + "/",
12929                                                 "//" + user_name + ips[0] + port + "/", 1)
12930                                 except SystemExit, e:
12931                                         raise # Needed else can't exit
12932                                 except Exception, e:
12933                                         print "Notice:",str(e)
12934                                         dosyncuri=syncuri
12935
12936                         if (retries==0):
12937                                 if "--ask" in myopts:
12938                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12939                                                 print
12940                                                 print "Quitting."
12941                                                 print
12942                                                 sys.exit(0)
12943                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12944                                 if "--quiet" not in myopts:
12945                                         print ">>> Starting rsync with "+dosyncuri+"..."
12946                         else:
12947                                 emergelog(xterm_titles,
12948                                         ">>> Starting retry %d of %d with %s" % \
12949                                                 (retries,maxretries,dosyncuri))
12950                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12951
12952                         if mytimestamp != 0 and "--quiet" not in myopts:
12953                                 print ">>> Checking server timestamp ..."
12954
12955                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12956
12957                         if "--debug" in myopts:
12958                                 print rsynccommand
12959
12960                         exitcode = os.EX_OK
12961                         servertimestamp = 0
12962                         # Even if there's no timestamp available locally, fetch the
12963                         # timestamp anyway as an initial probe to verify that the server is
12964                         # responsive.  This protects us from hanging indefinitely on a
12965                         # connection attempt to an unresponsive server which rsync's
12966                         # --timeout option does not prevent.
12967                         if True:
12968                                 # Temporary file for remote server timestamp comparison.
12969                                 from tempfile import mkstemp
12970                                 fd, tmpservertimestampfile = mkstemp()
12971                                 os.close(fd)
12972                                 mycommand = rsynccommand[:]
12973                                 mycommand.append(dosyncuri.rstrip("/") + \
12974                                         "/metadata/timestamp.chk")
12975                                 mycommand.append(tmpservertimestampfile)
12976                                 content = None
12977                                 mypids = []
12978                                 try:
12979                                         def timeout_handler(signum, frame):
12980                                                 raise portage.exception.PortageException("timed out")
12981                                         signal.signal(signal.SIGALRM, timeout_handler)
12982                                         # Timeout here in case the server is unresponsive.  The
12983                                         # --timeout rsync option doesn't apply to the initial
12984                                         # connection attempt.
12985                                         if rsync_initial_timeout:
12986                                                 signal.alarm(rsync_initial_timeout)
12987                                         try:
12988                                                 mypids.extend(portage.process.spawn(
12989                                                         mycommand, env=settings.environ(), returnpid=True))
12990                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12991                                                 content = portage.grabfile(tmpservertimestampfile)
12992                                         finally:
12993                                                 if rsync_initial_timeout:
12994                                                         signal.alarm(0)
12995                                                 try:
12996                                                         os.unlink(tmpservertimestampfile)
12997                                                 except OSError:
12998                                                         pass
12999                                 except portage.exception.PortageException, e:
13000                                         # timed out
13001                                         print e
13002                                         del e
13003                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13004                                                 os.kill(mypids[0], signal.SIGTERM)
13005                                                 os.waitpid(mypids[0], 0)
13006                                         # This is the same code rsync uses for timeout.
13007                                         exitcode = 30
13008                                 else:
13009                                         if exitcode != os.EX_OK:
13010                                                 if exitcode & 0xff:
13011                                                         exitcode = (exitcode & 0xff) << 8
13012                                                 else:
13013                                                         exitcode = exitcode >> 8
13014                                 if mypids:
13015                                         portage.process.spawned_pids.remove(mypids[0])
13016                                 if content:
13017                                         try:
13018                                                 servertimestamp = time.mktime(time.strptime(
13019                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13020                                         except (OverflowError, ValueError):
13021                                                 pass
13022                                 del mycommand, mypids, content
13023                         if exitcode == os.EX_OK:
13024                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13025                                         emergelog(xterm_titles,
13026                                                 ">>> Cancelling sync -- Already current.")
13027                                         print
13028                                         print ">>>"
13029                                         print ">>> Timestamps on the server and in the local repository are the same."
13030                                         print ">>> Cancelling all further sync action. You are already up to date."
13031                                         print ">>>"
13032                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13033                                         print ">>>"
13034                                         print
13035                                         sys.exit(0)
13036                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13037                                         emergelog(xterm_titles,
13038                                                 ">>> Server out of date: %s" % dosyncuri)
13039                                         print
13040                                         print ">>>"
13041                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13042                                         print ">>>"
13043                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13044                                         print ">>>"
13045                                         print
13046                                         exitcode = SERVER_OUT_OF_DATE
13047                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13048                                         # actual sync
13049                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13050                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13051                                         if exitcode in [0,1,3,4,11,14,20,21]:
13052                                                 break
13053                         elif exitcode in [1,3,4,11,14,20,21]:
13054                                 break
13055                         else:
13056                                 # Code 2 indicates protocol incompatibility, which is expected
13057                                 # for servers with protocol < 29 that don't support
13058                                 # --prune-empty-directories.  Retry for a server that supports
13059                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13060                                 pass
13061
13062                         retries=retries+1
13063
13064                         if retries<=maxretries:
13065                                 print ">>> Retrying..."
13066                                 time.sleep(11)
13067                         else:
13068                                 # over retries
13069                                 # exit loop
13070                                 updatecache_flg=False
13071                                 exitcode = EXCEEDED_MAX_RETRIES
13072                                 break
13073
13074                 if (exitcode==0):
13075                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13076                 elif exitcode == SERVER_OUT_OF_DATE:
13077                         sys.exit(1)
13078                 elif exitcode == EXCEEDED_MAX_RETRIES:
13079                         sys.stderr.write(
13080                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13081                         sys.exit(1)
13082                 elif (exitcode>0):
13083                         msg = []
13084                         if exitcode==1:
13085                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13086                                 msg.append("that your SYNC statement is proper.")
13087                                 msg.append("SYNC=" + settings["SYNC"])
13088                         elif exitcode==11:
13089                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13090                                 msg.append("this means your disk is full, but can be caused by corruption")
13091                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13092                                 msg.append("and try again after the problem has been fixed.")
13093                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13094                         elif exitcode==20:
13095                                 msg.append("Rsync was killed before it finished.")
13096                         else:
13097                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13098                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13099                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13100                                 msg.append("temporary problem unless complications exist with your network")
13101                                 msg.append("(and possibly your system's filesystem) configuration.")
13102                         for line in msg:
13103                                 out.eerror(line)
13104                         sys.exit(exitcode)
13105         elif syncuri[:6]=="cvs://":
13106                 if not os.path.exists("/usr/bin/cvs"):
13107                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13108                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13109                         sys.exit(1)
13110                 cvsroot=syncuri[6:]
13111                 cvsdir=os.path.dirname(myportdir)
13112                 if not os.path.exists(myportdir+"/CVS"):
13113                         #initial checkout
13114                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13115                         if os.path.exists(cvsdir+"/gentoo-x86"):
13116                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13117                                 sys.exit(1)
13118                         try:
13119                                 os.rmdir(myportdir)
13120                         except OSError, e:
13121                                 if e.errno != errno.ENOENT:
13122                                         sys.stderr.write(
13123                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13124                                         sys.exit(1)
13125                                 del e
13126                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13127                                 print "!!! cvs checkout error; exiting."
13128                                 sys.exit(1)
13129                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13130                 else:
13131                         #cvs update
13132                         print ">>> Starting cvs update with "+syncuri+"..."
13133                         retval = portage.process.spawn_bash(
13134                                 "cd %s; cvs -z0 -q update -dP" % \
13135                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13136                         if retval != os.EX_OK:
13137                                 sys.exit(retval)
13138                 dosyncuri = syncuri
13139         else:
13140                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13141                         noiselevel=-1, level=logging.ERROR)
13142                 return 1
13143
13144         if updatecache_flg and  \
13145                 myaction != "metadata" and \
13146                 "metadata-transfer" not in settings.features:
13147                 updatecache_flg = False
13148
13149         # Reload the whole config from scratch.
13150         settings, trees, mtimedb = load_emerge_config(trees=trees)
13151         root_config = trees[settings["ROOT"]]["root_config"]
13152         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13153
13154         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
13155                 action_metadata(settings, portdb, myopts)
13156
13157         if portage._global_updates(trees, mtimedb["updates"]):
13158                 mtimedb.commit()
13159                 # Reload the whole config from scratch.
13160                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13161                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13162                 root_config = trees[settings["ROOT"]]["root_config"]
13163
13164         mybestpv = portdb.xmatch("bestmatch-visible",
13165                 portage.const.PORTAGE_PACKAGE_ATOM)
13166         mypvs = portage.best(
13167                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13168                 portage.const.PORTAGE_PACKAGE_ATOM))
13169
13170         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13171
13172         if myaction != "metadata":
13173                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13174                         retval = portage.process.spawn(
13175                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13176                                 dosyncuri], env=settings.environ())
13177                         if retval != os.EX_OK:
13178                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13179
13180         if(mybestpv != mypvs) and not "--quiet" in myopts:
13181                 print
13182                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13183                 print red(" * ")+"that you update portage now, before any other packages are updated."
13184                 print
13185                 print red(" * ")+"To update portage, run 'emerge portage' now."
13186                 print
13187         
13188         display_news_notification(root_config, myopts)
13189         return os.EX_OK
13190
13191 def git_sync_timestamps(settings, portdir):
13192         """
13193         Since git doesn't preserve timestamps, synchronize timestamps between
13194         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13195         for a given file as long as the file in the working tree is not modified
13196         (relative to HEAD).
13197         """
13198         cache_dir = os.path.join(portdir, "metadata", "cache")
13199         if not os.path.isdir(cache_dir):
13200                 return os.EX_OK
13201         writemsg_level(">>> Synchronizing timestamps...\n")
13202
13203         from portage.cache.cache_errors import CacheError
13204         try:
13205                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13206                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13207         except CacheError, e:
13208                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13209                         level=logging.ERROR, noiselevel=-1)
13210                 return 1
13211
13212         ec_dir = os.path.join(portdir, "eclass")
13213         try:
13214                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13215                         if f.endswith(".eclass"))
13216         except OSError, e:
13217                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13218                         level=logging.ERROR, noiselevel=-1)
13219                 return 1
13220
13221         args = [portage.const.BASH_BINARY, "-c",
13222                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13223                 portage._shell_quote(portdir)]
13224         import subprocess
13225         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13226         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13227         rval = proc.wait()
13228         if rval != os.EX_OK:
13229                 return rval
13230
13231         modified_eclasses = set(ec for ec in ec_names \
13232                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13233
13234         updated_ec_mtimes = {}
13235
13236         for cpv in cache_db:
13237                 cpv_split = portage.catpkgsplit(cpv)
13238                 if cpv_split is None:
13239                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13240                                 level=logging.ERROR, noiselevel=-1)
13241                         continue
13242
13243                 cat, pn, ver, rev = cpv_split
13244                 cat, pf = portage.catsplit(cpv)
13245                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13246                 if relative_eb_path in modified_files:
13247                         continue
13248
13249                 try:
13250                         cache_entry = cache_db[cpv]
13251                         eb_mtime = cache_entry.get("_mtime_")
13252                         ec_mtimes = cache_entry.get("_eclasses_")
13253                 except KeyError:
13254                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13255                                 level=logging.ERROR, noiselevel=-1)
13256                         continue
13257                 except CacheError, e:
13258                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13259                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13260                         continue
13261
13262                 if eb_mtime is None:
13263                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13264                                 level=logging.ERROR, noiselevel=-1)
13265                         continue
13266
13267                 try:
13268                         eb_mtime = long(eb_mtime)
13269                 except ValueError:
13270                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13271                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13272                         continue
13273
13274                 if ec_mtimes is None:
13275                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13276                                 level=logging.ERROR, noiselevel=-1)
13277                         continue
13278
13279                 if modified_eclasses.intersection(ec_mtimes):
13280                         continue
13281
13282                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13283                 if missing_eclasses:
13284                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13285                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13286                                 noiselevel=-1)
13287                         continue
13288
13289                 eb_path = os.path.join(portdir, relative_eb_path)
13290                 try:
13291                         current_eb_mtime = os.stat(eb_path)
13292                 except OSError:
13293                         writemsg_level("!!! Missing ebuild: %s\n" % \
13294                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13295                         continue
13296
13297                 inconsistent = False
13298                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13299                         updated_mtime = updated_ec_mtimes.get(ec)
13300                         if updated_mtime is not None and updated_mtime != ec_mtime:
13301                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13302                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13303                                 inconsistent = True
13304                                 break
13305
13306                 if inconsistent:
13307                         continue
13308
13309                 if current_eb_mtime != eb_mtime:
13310                         os.utime(eb_path, (eb_mtime, eb_mtime))
13311
13312                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13313                         if ec in updated_ec_mtimes:
13314                                 continue
13315                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13316                         current_mtime = long(os.stat(ec_path).st_mtime)
13317                         if current_mtime != ec_mtime:
13318                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13319                         updated_ec_mtimes[ec] = ec_mtime
13320
13321         return os.EX_OK
13322
13323 def action_metadata(settings, portdb, myopts):
13324         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
13325         old_umask = os.umask(0002)
13326         cachedir = os.path.normpath(settings.depcachedir)
13327         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13328                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13329                                         "/sys", "/tmp", "/usr",  "/var"]:
13330                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13331                         "ROOT DIRECTORY ON YOUR SYSTEM."
13332                 print >> sys.stderr, \
13333                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13334                 sys.exit(73)
13335         if not os.path.exists(cachedir):
13336                 os.mkdir(cachedir)
13337
13338         ec = portage.eclass_cache.cache(portdb.porttree_root)
13339         myportdir = os.path.realpath(settings["PORTDIR"])
13340         cm = settings.load_best_module("portdbapi.metadbmodule")(
13341                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13342
13343         from portage.cache import util
13344
13345         class percentage_noise_maker(util.quiet_mirroring):
13346                 def __init__(self, dbapi):
13347                         self.dbapi = dbapi
13348                         self.cp_all = dbapi.cp_all()
13349                         l = len(self.cp_all)
13350                         self.call_update_min = 100000000
13351                         self.min_cp_all = l/100.0
13352                         self.count = 1
13353                         self.pstr = ''
13354
13355                 def __iter__(self):
13356                         for x in self.cp_all:
13357                                 self.count += 1
13358                                 if self.count > self.min_cp_all:
13359                                         self.call_update_min = 0
13360                                         self.count = 0
13361                                 for y in self.dbapi.cp_list(x):
13362                                         yield y
13363                         self.call_update_mine = 0
13364
13365                 def update(self, *arg):
13366                         try:
13367                                 self.pstr = int(self.pstr) + 1
13368                         except ValueError:
13369                                 self.pstr = 1
13370                         sys.stdout.write("%s%i%%" % \
13371                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
13372                         sys.stdout.flush()
13373                         self.call_update_min = 10000000
13374
13375                 def finish(self, *arg):
13376                         sys.stdout.write("\b\b\b\b100%\n")
13377                         sys.stdout.flush()
13378
13379         if "--quiet" in myopts:
13380                 def quicky_cpv_generator(cp_all_list):
13381                         for x in cp_all_list:
13382                                 for y in portdb.cp_list(x):
13383                                         yield y
13384                 source = quicky_cpv_generator(portdb.cp_all())
13385                 noise_maker = portage.cache.util.quiet_mirroring()
13386         else:
13387                 noise_maker = source = percentage_noise_maker(portdb)
13388         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
13389                 eclass_cache=ec, verbose_instance=noise_maker)
13390
13391         sys.stdout.flush()
13392         os.umask(old_umask)
13393
13394 def action_regen(settings, portdb, max_jobs, max_load):
13395         xterm_titles = "notitles" not in settings.features
13396         emergelog(xterm_titles, " === regen")
13397         #regenerate cache entries
13398         portage.writemsg_stdout("Regenerating cache entries...\n")
13399         try:
13400                 os.close(sys.stdin.fileno())
13401         except SystemExit, e:
13402                 raise # Needed else can't exit
13403         except:
13404                 pass
13405         sys.stdout.flush()
13406
13407         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13408         regen.run()
13409
13410         portage.writemsg_stdout("done!\n")
13411         return regen.returncode
13412
13413 def action_config(settings, trees, myopts, myfiles):
13414         if len(myfiles) != 1:
13415                 print red("!!! config can only take a single package atom at this time\n")
13416                 sys.exit(1)
13417         if not is_valid_package_atom(myfiles[0]):
13418                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13419                         noiselevel=-1)
13420                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13421                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13422                 sys.exit(1)
13423         print
13424         try:
13425                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13426         except portage.exception.AmbiguousPackageName, e:
13427                 # Multiple matches thrown from cpv_expand
13428                 pkgs = e.args[0]
13429         if len(pkgs) == 0:
13430                 print "No packages found.\n"
13431                 sys.exit(0)
13432         elif len(pkgs) > 1:
13433                 if "--ask" in myopts:
13434                         options = []
13435                         print "Please select a package to configure:"
13436                         idx = 0
13437                         for pkg in pkgs:
13438                                 idx += 1
13439                                 options.append(str(idx))
13440                                 print options[-1]+") "+pkg
13441                         print "X) Cancel"
13442                         options.append("X")
13443                         idx = userquery("Selection?", options)
13444                         if idx == "X":
13445                                 sys.exit(0)
13446                         pkg = pkgs[int(idx)-1]
13447                 else:
13448                         print "The following packages available:"
13449                         for pkg in pkgs:
13450                                 print "* "+pkg
13451                         print "\nPlease use a specific atom or the --ask option."
13452                         sys.exit(1)
13453         else:
13454                 pkg = pkgs[0]
13455
13456         print
13457         if "--ask" in myopts:
13458                 if userquery("Ready to configure "+pkg+"?") == "No":
13459                         sys.exit(0)
13460         else:
13461                 print "Configuring pkg..."
13462         print
13463         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13464         mysettings = portage.config(clone=settings)
13465         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13466         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13467         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13468                 mysettings,
13469                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13470                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13471         if retval == os.EX_OK:
13472                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13473                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13474         print
13475
13476 def action_info(settings, trees, myopts, myfiles):
13477         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13478                 settings.profile_path, settings["CHOST"],
13479                 trees[settings["ROOT"]]["vartree"].dbapi)
13480         header_width = 65
13481         header_title = "System Settings"
13482         if myfiles:
13483                 print header_width * "="
13484                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13485         print header_width * "="
13486         print "System uname: "+platform.platform(aliased=1)
13487
13488         lastSync = portage.grabfile(os.path.join(
13489                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13490         print "Timestamp of tree:",
13491         if lastSync:
13492                 print lastSync[0]
13493         else:
13494                 print "Unknown"
13495
13496         output=commands.getstatusoutput("distcc --version")
13497         if not output[0]:
13498                 print str(output[1].split("\n",1)[0]),
13499                 if "distcc" in settings.features:
13500                         print "[enabled]"
13501                 else:
13502                         print "[disabled]"
13503
13504         output=commands.getstatusoutput("ccache -V")
13505         if not output[0]:
13506                 print str(output[1].split("\n",1)[0]),
13507                 if "ccache" in settings.features:
13508                         print "[enabled]"
13509                 else:
13510                         print "[disabled]"
13511
13512         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13513                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13514         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13515         myvars  = portage.util.unique_array(myvars)
13516         myvars.sort()
13517
13518         for x in myvars:
13519                 if portage.isvalidatom(x):
13520                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13521                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13522                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13523                         pkgs = []
13524                         for pn, ver, rev in pkg_matches:
13525                                 if rev != "r0":
13526                                         pkgs.append(ver + "-" + rev)
13527                                 else:
13528                                         pkgs.append(ver)
13529                         if pkgs:
13530                                 pkgs = ", ".join(pkgs)
13531                                 print "%-20s %s" % (x+":", pkgs)
13532                 else:
13533                         print "%-20s %s" % (x+":", "[NOT VALID]")
13534
13535         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13536
13537         if "--verbose" in myopts:
13538                 myvars=settings.keys()
13539         else:
13540                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13541                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13542                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13543                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13544
13545                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13546
13547         myvars = portage.util.unique_array(myvars)
13548         use_expand = settings.get('USE_EXPAND', '').split()
13549         use_expand.sort()
13550         use_expand_hidden = set(
13551                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13552         alphabetical_use = '--alphabetical' in myopts
13553         root_config = trees[settings["ROOT"]]['root_config']
13554         unset_vars = []
13555         myvars.sort()
13556         for x in myvars:
13557                 if x in settings:
13558                         if x != "USE":
13559                                 print '%s="%s"' % (x, settings[x])
13560                         else:
13561                                 use = set(settings["USE"].split())
13562                                 for varname in use_expand:
13563                                         flag_prefix = varname.lower() + "_"
13564                                         for f in list(use):
13565                                                 if f.startswith(flag_prefix):
13566                                                         use.remove(f)
13567                                 use = list(use)
13568                                 use.sort()
13569                                 print 'USE="%s"' % " ".join(use),
13570                                 for varname in use_expand:
13571                                         myval = settings.get(varname)
13572                                         if myval:
13573                                                 print '%s="%s"' % (varname, myval),
13574                                 print
13575                 else:
13576                         unset_vars.append(x)
13577         if unset_vars:
13578                 print "Unset:  "+", ".join(unset_vars)
13579         print
13580
13581         if "--debug" in myopts:
13582                 for x in dir(portage):
13583                         module = getattr(portage, x)
13584                         if "cvs_id_string" in dir(module):
13585                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13586
13587         # See if we can find any packages installed matching the strings
13588         # passed on the command line
13589         mypkgs = []
13590         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13591         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13592         for x in myfiles:
13593                 mypkgs.extend(vardb.match(x))
13594
13595         # If some packages were found...
13596         if mypkgs:
13597                 # Get our global settings (we only print stuff if it varies from
13598                 # the current config)
13599                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13600                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13601                 global_vals = {}
13602                 pkgsettings = portage.config(clone=settings)
13603
13604                 for myvar in mydesiredvars:
13605                         global_vals[myvar] = set(settings.get(myvar, "").split())
13606
13607                 # Loop through each package
13608                 # Only print settings if they differ from global settings
13609                 header_title = "Package Settings"
13610                 print header_width * "="
13611                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13612                 print header_width * "="
13613                 from portage.output import EOutput
13614                 out = EOutput()
13615                 for cpv in mypkgs:
13616                         # Get all package specific variables
13617                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13618                         pkg = Package(built=True, cpv=cpv,
13619                                 installed=True, metadata=izip(Package.metadata_keys,
13620                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13621                                 root_config=root_config, type_name='installed')
13622                         valuesmap = {}
13623                         for k in auxkeys:
13624                                 valuesmap[k] = set(metadata[k].split())
13625
13626                         diff_values = {}
13627                         for myvar in mydesiredvars:
13628                                 # If the package variable doesn't match the
13629                                 # current global variable, something has changed
13630                                 # so set diff_found so we know to print
13631                                 if valuesmap[myvar] != global_vals[myvar]:
13632                                         diff_values[myvar] = valuesmap[myvar]
13633
13634                         # If a difference was found, print the info for
13635                         # this package.
13636                         if diff_values:
13637                                 # Print package info
13638                                 print "%s was built with the following:" % pkg.cpv
13639                                 for myvar in mydesiredvars:
13640                                         if myvar in diff_values:
13641                                                 mylist = list(diff_values[myvar])
13642                                                 mylist.sort()
13643                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13644
13645                         pkgsettings.setcpv(pkg)
13646                         forced_flags = set(chain(pkgsettings.useforce,
13647                                 pkgsettings.usemask))
13648                         use = set(pkg.use.enabled)
13649                         use.discard(pkgsettings.get('ARCH'))
13650                         use_expand_flags = set()
13651                         use_enabled = {}
13652                         use_disabled = {}
13653                         for varname in use_expand:
13654                                 flag_prefix = varname.lower() + "_"
13655                                 for f in use:
13656                                         if f.startswith(flag_prefix):
13657                                                 use_expand_flags.add(f)
13658                                                 use_enabled.setdefault(
13659                                                         varname.upper(), []).append(f[len(flag_prefix):])
13660
13661                                 for f in pkg.iuse.all:
13662                                         if f.startswith(flag_prefix):
13663                                                 use_expand_flags.add(f)
13664                                                 if f not in use:
13665                                                         use_disabled.setdefault(
13666                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13667
13668                         var_order = set(use_enabled)
13669                         var_order.update(use_disabled)
13670                         var_order = sorted(var_order)
13671                         var_order.insert(0, 'USE')
13672                         use.difference_update(use_expand_flags)
13673                         use_enabled['USE'] = list(use)
13674                         use_disabled['USE'] = []
13675
13676                         for f in pkg.iuse.all:
13677                                 if f not in use and \
13678                                         f not in use_expand_flags:
13679                                         use_disabled['USE'].append(f)
13680
13681                         for varname in var_order:
13682                                 if varname in use_expand_hidden:
13683                                         continue
13684                                 flags = []
13685                                 for f in use_enabled.get(varname, []):
13686                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13687                                 for f in use_disabled.get(varname, []):
13688                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13689                                 if alphabetical_use:
13690                                         flags.sort(key=cmp_sort_key(UseFlagDisplay.cmp_combined))
13691                                 else:
13692                                         flags.sort(key=cmp_sort_key(UseFlagDisplay.cmp_separated))
13693                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13694                         print
13695
13696                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13697                         ebuildpath = vardb.findname(pkg.cpv)
13698                         if not ebuildpath or not os.path.exists(ebuildpath):
13699                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13700                                 continue
13701                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13702                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13703                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13704                                 tree="vartree")
13705
13706 def action_search(root_config, myopts, myfiles, spinner):
13707         if not myfiles:
13708                 print "emerge: no search terms provided."
13709         else:
13710                 searchinstance = search(root_config,
13711                         spinner, "--searchdesc" in myopts,
13712                         "--quiet" not in myopts, "--usepkg" in myopts,
13713                         "--usepkgonly" in myopts)
13714                 for mysearch in myfiles:
13715                         try:
13716                                 searchinstance.execute(mysearch)
13717                         except re.error, comment:
13718                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13719                                 sys.exit(1)
13720                         searchinstance.output()
13721
13722 def action_depclean(settings, trees, ldpath_mtimes,
13723         myopts, action, myfiles, spinner):
13724         # Kill packages that aren't explicitly merged or are required as a
13725         # dependency of another package. World file is explicit.
13726
13727         # Global depclean or prune operations are not very safe when there are
13728         # missing dependencies since it's unknown how badly incomplete
13729         # the dependency graph is, and we might accidentally remove packages
13730         # that should have been pulled into the graph. On the other hand, it's
13731         # relatively safe to ignore missing deps when only asked to remove
13732         # specific packages.
13733         allow_missing_deps = len(myfiles) > 0
13734
13735         msg = []
13736         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13737         msg.append("mistakes. Packages that are part of the world set will always\n")
13738         msg.append("be kept.  They can be manually added to this set with\n")
13739         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13740         msg.append("package.provided (see portage(5)) will be removed by\n")
13741         msg.append("depclean, even if they are part of the world set.\n")
13742         msg.append("\n")
13743         msg.append("As a safety measure, depclean will not remove any packages\n")
13744         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13745         msg.append("consequence, it is often necessary to run %s\n" % \
13746                 good("`emerge --update"))
13747         msg.append(good("--newuse --deep @system @world`") + \
13748                 " prior to depclean.\n")
13749
13750         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13751                 portage.writemsg_stdout("\n")
13752                 for x in msg:
13753                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13754
13755         xterm_titles = "notitles" not in settings.features
13756         myroot = settings["ROOT"]
13757         root_config = trees[myroot]["root_config"]
13758         getSetAtoms = root_config.setconfig.getSetAtoms
13759         vardb = trees[myroot]["vartree"].dbapi
13760
13761         required_set_names = ("system", "world")
13762         required_sets = {}
13763         set_args = []
13764
13765         for s in required_set_names:
13766                 required_sets[s] = InternalPackageSet(
13767                         initial_atoms=getSetAtoms(s))
13768
13769         
13770         # When removing packages, use a temporary version of world
13771         # which excludes packages that are intended to be eligible for
13772         # removal.
13773         world_temp_set = required_sets["world"]
13774         system_set = required_sets["system"]
13775
13776         if not system_set or not world_temp_set:
13777
13778                 if not system_set:
13779                         writemsg_level("!!! You have no system list.\n",
13780                                 level=logging.ERROR, noiselevel=-1)
13781
13782                 if not world_temp_set:
13783                         writemsg_level("!!! You have no world file.\n",
13784                                         level=logging.WARNING, noiselevel=-1)
13785
13786                 writemsg_level("!!! Proceeding is likely to " + \
13787                         "break your installation.\n",
13788                         level=logging.WARNING, noiselevel=-1)
13789                 if "--pretend" not in myopts:
13790                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13791
13792         if action == "depclean":
13793                 emergelog(xterm_titles, " >>> depclean")
13794
13795         import textwrap
13796         args_set = InternalPackageSet()
13797         if myfiles:
13798                 for x in myfiles:
13799                         if not is_valid_package_atom(x):
13800                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13801                                         level=logging.ERROR, noiselevel=-1)
13802                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13803                                 return
13804                         try:
13805                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13806                         except portage.exception.AmbiguousPackageName, e:
13807                                 msg = "The short ebuild name \"" + x + \
13808                                         "\" is ambiguous.  Please specify " + \
13809                                         "one of the following " + \
13810                                         "fully-qualified ebuild names instead:"
13811                                 for line in textwrap.wrap(msg, 70):
13812                                         writemsg_level("!!! %s\n" % (line,),
13813                                                 level=logging.ERROR, noiselevel=-1)
13814                                 for i in e[0]:
13815                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13816                                                 level=logging.ERROR, noiselevel=-1)
13817                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13818                                 return
13819                         args_set.add(atom)
13820                 matched_packages = False
13821                 for x in args_set:
13822                         if vardb.match(x):
13823                                 matched_packages = True
13824                                 break
13825                 if not matched_packages:
13826                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13827                                 action)
13828                         return
13829
13830         writemsg_level("\nCalculating dependencies  ")
13831         resolver_params = create_depgraph_params(myopts, "remove")
13832         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13833         vardb = resolver.trees[myroot]["vartree"].dbapi
13834
13835         if action == "depclean":
13836
13837                 if args_set:
13838                         # Pull in everything that's installed but not matched
13839                         # by an argument atom since we don't want to clean any
13840                         # package if something depends on it.
13841
13842                         world_temp_set.clear()
13843                         for pkg in vardb:
13844                                 spinner.update()
13845
13846                                 try:
13847                                         if args_set.findAtomForPackage(pkg) is None:
13848                                                 world_temp_set.add("=" + pkg.cpv)
13849                                                 continue
13850                                 except portage.exception.InvalidDependString, e:
13851                                         show_invalid_depstring_notice(pkg,
13852                                                 pkg.metadata["PROVIDE"], str(e))
13853                                         del e
13854                                         world_temp_set.add("=" + pkg.cpv)
13855                                         continue
13856
13857         elif action == "prune":
13858
13859                 # Pull in everything that's installed since we don't
13860                 # to prune a package if something depends on it.
13861                 world_temp_set.clear()
13862                 world_temp_set.update(vardb.cp_all())
13863
13864                 if not args_set:
13865
13866                         # Try to prune everything that's slotted.
13867                         for cp in vardb.cp_all():
13868                                 if len(vardb.cp_list(cp)) > 1:
13869                                         args_set.add(cp)
13870
13871                 # Remove atoms from world that match installed packages
13872                 # that are also matched by argument atoms, but do not remove
13873                 # them if they match the highest installed version.
13874                 for pkg in vardb:
13875                         spinner.update()
13876                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13877                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13878                                 raise AssertionError("package expected in matches: " + \
13879                                         "cp = %s, cpv = %s matches = %s" % \
13880                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13881
13882                         highest_version = pkgs_for_cp[-1]
13883                         if pkg == highest_version:
13884                                 # pkg is the highest version
13885                                 world_temp_set.add("=" + pkg.cpv)
13886                                 continue
13887
13888                         if len(pkgs_for_cp) <= 1:
13889                                 raise AssertionError("more packages expected: " + \
13890                                         "cp = %s, cpv = %s matches = %s" % \
13891                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13892
13893                         try:
13894                                 if args_set.findAtomForPackage(pkg) is None:
13895                                         world_temp_set.add("=" + pkg.cpv)
13896                                         continue
13897                         except portage.exception.InvalidDependString, e:
13898                                 show_invalid_depstring_notice(pkg,
13899                                         pkg.metadata["PROVIDE"], str(e))
13900                                 del e
13901                                 world_temp_set.add("=" + pkg.cpv)
13902                                 continue
13903
13904         set_args = {}
13905         for s, package_set in required_sets.iteritems():
13906                 set_atom = SETPREFIX + s
13907                 set_arg = SetArg(arg=set_atom, set=package_set,
13908                         root_config=resolver.roots[myroot])
13909                 set_args[s] = set_arg
13910                 for atom in set_arg.set:
13911                         resolver._dep_stack.append(
13912                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13913                         resolver.digraph.add(set_arg, None)
13914
13915         success = resolver._complete_graph()
13916         writemsg_level("\b\b... done!\n")
13917
13918         resolver.display_problems()
13919
13920         if not success:
13921                 return 1
13922
13923         def unresolved_deps():
13924
13925                 unresolvable = set()
13926                 for dep in resolver._initially_unsatisfied_deps:
13927                         if isinstance(dep.parent, Package) and \
13928                                 (dep.priority > UnmergeDepPriority.SOFT):
13929                                 unresolvable.add((dep.atom, dep.parent.cpv))
13930
13931                 if not unresolvable:
13932                         return False
13933
13934                 if unresolvable and not allow_missing_deps:
13935                         prefix = bad(" * ")
13936                         msg = []
13937                         msg.append("Dependencies could not be completely resolved due to")
13938                         msg.append("the following required packages not being installed:")
13939                         msg.append("")
13940                         for atom, parent in unresolvable:
13941                                 msg.append("  %s pulled in by:" % (atom,))
13942                                 msg.append("    %s" % (parent,))
13943                                 msg.append("")
13944                         msg.append("Have you forgotten to run " + \
13945                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13946                         msg.append(("to %s? It may be necessary to manually " + \
13947                                 "uninstall packages that no longer") % action)
13948                         msg.append("exist in the portage tree since " + \
13949                                 "it may not be possible to satisfy their")
13950                         msg.append("dependencies.  Also, be aware of " + \
13951                                 "the --with-bdeps option that is documented")
13952                         msg.append("in " + good("`man emerge`") + ".")
13953                         if action == "prune":
13954                                 msg.append("")
13955                                 msg.append("If you would like to ignore " + \
13956                                         "dependencies then use %s." % good("--nodeps"))
13957                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13958                                 level=logging.ERROR, noiselevel=-1)
13959                         return True
13960                 return False
13961
13962         if unresolved_deps():
13963                 return 1
13964
13965         graph = resolver.digraph.copy()
13966         required_pkgs_total = 0
13967         for node in graph:
13968                 if isinstance(node, Package):
13969                         required_pkgs_total += 1
13970
13971         def show_parents(child_node):
13972                 parent_nodes = graph.parent_nodes(child_node)
13973                 if not parent_nodes:
13974                         # With --prune, the highest version can be pulled in without any
13975                         # real parent since all installed packages are pulled in.  In that
13976                         # case there's nothing to show here.
13977                         return
13978                 parent_strs = []
13979                 for node in parent_nodes:
13980                         parent_strs.append(str(getattr(node, "cpv", node)))
13981                 parent_strs.sort()
13982                 msg = []
13983                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13984                 for parent_str in parent_strs:
13985                         msg.append("    %s\n" % (parent_str,))
13986                 msg.append("\n")
13987                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13988
13989         def cmp_pkg_cpv(pkg1, pkg2):
13990                 """Sort Package instances by cpv."""
13991                 if pkg1.cpv > pkg2.cpv:
13992                         return 1
13993                 elif pkg1.cpv == pkg2.cpv:
13994                         return 0
13995                 else:
13996                         return -1
13997
13998         def create_cleanlist():
13999                 pkgs_to_remove = []
14000
14001                 if action == "depclean":
14002                         if args_set:
14003
14004                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14005                                         arg_atom = None
14006                                         try:
14007                                                 arg_atom = args_set.findAtomForPackage(pkg)
14008                                         except portage.exception.InvalidDependString:
14009                                                 # this error has already been displayed by now
14010                                                 continue
14011
14012                                         if arg_atom:
14013                                                 if pkg not in graph:
14014                                                         pkgs_to_remove.append(pkg)
14015                                                 elif "--verbose" in myopts:
14016                                                         show_parents(pkg)
14017
14018                         else:
14019                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14020                                         if pkg not in graph:
14021                                                 pkgs_to_remove.append(pkg)
14022                                         elif "--verbose" in myopts:
14023                                                 show_parents(pkg)
14024
14025                 elif action == "prune":
14026                         # Prune really uses all installed instead of world. It's not
14027                         # a real reverse dependency so don't display it as such.
14028                         graph.remove(set_args["world"])
14029
14030                         for atom in args_set:
14031                                 for pkg in vardb.match_pkgs(atom):
14032                                         if pkg not in graph:
14033                                                 pkgs_to_remove.append(pkg)
14034                                         elif "--verbose" in myopts:
14035                                                 show_parents(pkg)
14036
14037                 if not pkgs_to_remove:
14038                         writemsg_level(
14039                                 ">>> No packages selected for removal by %s\n" % action)
14040                         if "--verbose" not in myopts:
14041                                 writemsg_level(
14042                                         ">>> To see reverse dependencies, use %s\n" % \
14043                                                 good("--verbose"))
14044                         if action == "prune":
14045                                 writemsg_level(
14046                                         ">>> To ignore dependencies, use %s\n" % \
14047                                                 good("--nodeps"))
14048
14049                 return pkgs_to_remove
14050
14051         cleanlist = create_cleanlist()
14052
14053         if len(cleanlist):
14054                 clean_set = set(cleanlist)
14055
14056                 # Check if any of these package are the sole providers of libraries
14057                 # with consumers that have not been selected for removal. If so, these
14058                 # packages and any dependencies need to be added to the graph.
14059                 real_vardb = trees[myroot]["vartree"].dbapi
14060                 linkmap = real_vardb.linkmap
14061                 liblist = linkmap.listLibraryObjects()
14062                 consumer_cache = {}
14063                 provider_cache = {}
14064                 soname_cache = {}
14065                 consumer_map = {}
14066
14067                 writemsg_level(">>> Checking for lib consumers...\n")
14068
14069                 for pkg in cleanlist:
14070                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14071                         provided_libs = set()
14072
14073                         for lib in liblist:
14074                                 if pkg_dblink.isowner(lib, myroot):
14075                                         provided_libs.add(lib)
14076
14077                         if not provided_libs:
14078                                 continue
14079
14080                         consumers = {}
14081                         for lib in provided_libs:
14082                                 lib_consumers = consumer_cache.get(lib)
14083                                 if lib_consumers is None:
14084                                         lib_consumers = linkmap.findConsumers(lib)
14085                                         consumer_cache[lib] = lib_consumers
14086                                 if lib_consumers:
14087                                         consumers[lib] = lib_consumers
14088
14089                         if not consumers:
14090                                 continue
14091
14092                         for lib, lib_consumers in consumers.items():
14093                                 for consumer_file in list(lib_consumers):
14094                                         if pkg_dblink.isowner(consumer_file, myroot):
14095                                                 lib_consumers.remove(consumer_file)
14096                                 if not lib_consumers:
14097                                         del consumers[lib]
14098
14099                         if not consumers:
14100                                 continue
14101
14102                         for lib, lib_consumers in consumers.iteritems():
14103
14104                                 soname = soname_cache.get(lib)
14105                                 if soname is None:
14106                                         soname = linkmap.getSoname(lib)
14107                                         soname_cache[lib] = soname
14108
14109                                 consumer_providers = []
14110                                 for lib_consumer in lib_consumers:
14111                                         providers = provider_cache.get(lib)
14112                                         if providers is None:
14113                                                 providers = linkmap.findProviders(lib_consumer)
14114                                                 provider_cache[lib_consumer] = providers
14115                                         if soname not in providers:
14116                                                 # Why does this happen?
14117                                                 continue
14118                                         consumer_providers.append(
14119                                                 (lib_consumer, providers[soname]))
14120
14121                                 consumers[lib] = consumer_providers
14122
14123                         consumer_map[pkg] = consumers
14124
14125                 if consumer_map:
14126
14127                         search_files = set()
14128                         for consumers in consumer_map.itervalues():
14129                                 for lib, consumer_providers in consumers.iteritems():
14130                                         for lib_consumer, providers in consumer_providers:
14131                                                 search_files.add(lib_consumer)
14132                                                 search_files.update(providers)
14133
14134                         writemsg_level(">>> Assigning files to packages...\n")
14135                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14136
14137                         for pkg, consumers in consumer_map.items():
14138                                 for lib, consumer_providers in consumers.items():
14139                                         lib_consumers = set()
14140
14141                                         for lib_consumer, providers in consumer_providers:
14142                                                 owner_set = file_owners.get(lib_consumer)
14143                                                 provider_dblinks = set()
14144                                                 provider_pkgs = set()
14145
14146                                                 if len(providers) > 1:
14147                                                         for provider in providers:
14148                                                                 provider_set = file_owners.get(provider)
14149                                                                 if provider_set is not None:
14150                                                                         provider_dblinks.update(provider_set)
14151
14152                                                 if len(provider_dblinks) > 1:
14153                                                         for provider_dblink in provider_dblinks:
14154                                                                 pkg_key = ("installed", myroot,
14155                                                                         provider_dblink.mycpv, "nomerge")
14156                                                                 if pkg_key not in clean_set:
14157                                                                         provider_pkgs.add(vardb.get(pkg_key))
14158
14159                                                 if provider_pkgs:
14160                                                         continue
14161
14162                                                 if owner_set is not None:
14163                                                         lib_consumers.update(owner_set)
14164
14165                                         for consumer_dblink in list(lib_consumers):
14166                                                 if ("installed", myroot, consumer_dblink.mycpv,
14167                                                         "nomerge") in clean_set:
14168                                                         lib_consumers.remove(consumer_dblink)
14169                                                         continue
14170
14171                                         if lib_consumers:
14172                                                 consumers[lib] = lib_consumers
14173                                         else:
14174                                                 del consumers[lib]
14175                                 if not consumers:
14176                                         del consumer_map[pkg]
14177
14178                 if consumer_map:
14179                         # TODO: Implement a package set for rebuilding consumer packages.
14180
14181                         msg = "In order to avoid breakage of link level " + \
14182                                 "dependencies, one or more packages will not be removed. " + \
14183                                 "This can be solved by rebuilding " + \
14184                                 "the packages that pulled them in."
14185
14186                         prefix = bad(" * ")
14187                         from textwrap import wrap
14188                         writemsg_level("".join(prefix + "%s\n" % line for \
14189                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14190
14191                         msg = []
14192                         for pkg, consumers in consumer_map.iteritems():
14193                                 unique_consumers = set(chain(*consumers.values()))
14194                                 unique_consumers = sorted(consumer.mycpv \
14195                                         for consumer in unique_consumers)
14196                                 msg.append("")
14197                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14198                                 for consumer in unique_consumers:
14199                                         msg.append("    %s" % (consumer,))
14200                         msg.append("")
14201                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14202                                 level=logging.WARNING, noiselevel=-1)
14203
14204                         # Add lib providers to the graph as children of lib consumers,
14205                         # and also add any dependencies pulled in by the provider.
14206                         writemsg_level(">>> Adding lib providers to graph...\n")
14207
14208                         for pkg, consumers in consumer_map.iteritems():
14209                                 for consumer_dblink in set(chain(*consumers.values())):
14210                                         consumer_pkg = vardb.get(("installed", myroot,
14211                                                 consumer_dblink.mycpv, "nomerge"))
14212                                         if not resolver._add_pkg(pkg,
14213                                                 Dependency(parent=consumer_pkg,
14214                                                 priority=UnmergeDepPriority(runtime=True),
14215                                                 root=pkg.root)):
14216                                                 resolver.display_problems()
14217                                                 return 1
14218
14219                         writemsg_level("\nCalculating dependencies  ")
14220                         success = resolver._complete_graph()
14221                         writemsg_level("\b\b... done!\n")
14222                         resolver.display_problems()
14223                         if not success:
14224                                 return 1
14225                         if unresolved_deps():
14226                                 return 1
14227
14228                         graph = resolver.digraph.copy()
14229                         required_pkgs_total = 0
14230                         for node in graph:
14231                                 if isinstance(node, Package):
14232                                         required_pkgs_total += 1
14233                         cleanlist = create_cleanlist()
14234                         if not cleanlist:
14235                                 return 0
14236                         clean_set = set(cleanlist)
14237
14238                 # Use a topological sort to create an unmerge order such that
14239                 # each package is unmerged before it's dependencies. This is
14240                 # necessary to avoid breaking things that may need to run
14241                 # during pkg_prerm or pkg_postrm phases.
14242
14243                 # Create a new graph to account for dependencies between the
14244                 # packages being unmerged.
14245                 graph = digraph()
14246                 del cleanlist[:]
14247
14248                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14249                 runtime = UnmergeDepPriority(runtime=True)
14250                 runtime_post = UnmergeDepPriority(runtime_post=True)
14251                 buildtime = UnmergeDepPriority(buildtime=True)
14252                 priority_map = {
14253                         "RDEPEND": runtime,
14254                         "PDEPEND": runtime_post,
14255                         "DEPEND": buildtime,
14256                 }
14257
14258                 for node in clean_set:
14259                         graph.add(node, None)
14260                         mydeps = []
14261                         node_use = node.metadata["USE"].split()
14262                         for dep_type in dep_keys:
14263                                 depstr = node.metadata[dep_type]
14264                                 if not depstr:
14265                                         continue
14266                                 try:
14267                                         portage.dep._dep_check_strict = False
14268                                         success, atoms = portage.dep_check(depstr, None, settings,
14269                                                 myuse=node_use, trees=resolver._graph_trees,
14270                                                 myroot=myroot)
14271                                 finally:
14272                                         portage.dep._dep_check_strict = True
14273                                 if not success:
14274                                         # Ignore invalid deps of packages that will
14275                                         # be uninstalled anyway.
14276                                         continue
14277
14278                                 priority = priority_map[dep_type]
14279                                 for atom in atoms:
14280                                         if not isinstance(atom, portage.dep.Atom):
14281                                                 # Ignore invalid atoms returned from dep_check().
14282                                                 continue
14283                                         if atom.blocker:
14284                                                 continue
14285                                         matches = vardb.match_pkgs(atom)
14286                                         if not matches:
14287                                                 continue
14288                                         for child_node in matches:
14289                                                 if child_node in clean_set:
14290                                                         graph.add(child_node, node, priority=priority)
14291
14292                 ordered = True
14293                 if len(graph.order) == len(graph.root_nodes()):
14294                         # If there are no dependencies between packages
14295                         # let unmerge() group them by cat/pn.
14296                         ordered = False
14297                         cleanlist = [pkg.cpv for pkg in graph.order]
14298                 else:
14299                         # Order nodes from lowest to highest overall reference count for
14300                         # optimal root node selection.
14301                         node_refcounts = {}
14302                         for node in graph.order:
14303                                 node_refcounts[node] = len(graph.parent_nodes(node))
14304                         def cmp_reference_count(node1, node2):
14305                                 return node_refcounts[node1] - node_refcounts[node2]
14306                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14307         
14308                         ignore_priority_range = [None]
14309                         ignore_priority_range.extend(
14310                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14311                         while not graph.empty():
14312                                 for ignore_priority in ignore_priority_range:
14313                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14314                                         if nodes:
14315                                                 break
14316                                 if not nodes:
14317                                         raise AssertionError("no root nodes")
14318                                 if ignore_priority is not None:
14319                                         # Some deps have been dropped due to circular dependencies,
14320                                         # so only pop one node in order do minimize the number that
14321                                         # are dropped.
14322                                         del nodes[1:]
14323                                 for node in nodes:
14324                                         graph.remove(node)
14325                                         cleanlist.append(node.cpv)
14326
14327                 unmerge(root_config, myopts, "unmerge", cleanlist,
14328                         ldpath_mtimes, ordered=ordered)
14329
14330         if action == "prune":
14331                 return
14332
14333         if not cleanlist and "--quiet" in myopts:
14334                 return
14335
14336         print "Packages installed:   "+str(len(vardb.cpv_all()))
14337         print "Packages in world:    " + \
14338                 str(len(root_config.sets["world"].getAtoms()))
14339         print "Packages in system:   " + \
14340                 str(len(root_config.sets["system"].getAtoms()))
14341         print "Required packages:    "+str(required_pkgs_total)
14342         if "--pretend" in myopts:
14343                 print "Number to remove:     "+str(len(cleanlist))
14344         else:
14345                 print "Number removed:       "+str(len(cleanlist))
14346
14347 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14348         """
14349         Construct a depgraph for the given resume list. This will raise
14350         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14351         @rtype: tuple
14352         @returns: (success, depgraph, dropped_tasks)
14353         """
14354         skip_masked = True
14355         skip_unsatisfied = True
14356         mergelist = mtimedb["resume"]["mergelist"]
14357         dropped_tasks = set()
14358         while True:
14359                 mydepgraph = depgraph(settings, trees,
14360                         myopts, myparams, spinner)
14361                 try:
14362                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14363                                 skip_masked=skip_masked)
14364                 except depgraph.UnsatisfiedResumeDep, e:
14365                         if not skip_unsatisfied:
14366                                 raise
14367
14368                         graph = mydepgraph.digraph
14369                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14370                                 for dep in e.value)
14371                         traversed_nodes = set()
14372                         unsatisfied_stack = list(unsatisfied_parents)
14373                         while unsatisfied_stack:
14374                                 pkg = unsatisfied_stack.pop()
14375                                 if pkg in traversed_nodes:
14376                                         continue
14377                                 traversed_nodes.add(pkg)
14378
14379                                 # If this package was pulled in by a parent
14380                                 # package scheduled for merge, removing this
14381                                 # package may cause the the parent package's
14382                                 # dependency to become unsatisfied.
14383                                 for parent_node in graph.parent_nodes(pkg):
14384                                         if not isinstance(parent_node, Package) \
14385                                                 or parent_node.operation not in ("merge", "nomerge"):
14386                                                 continue
14387                                         unsatisfied = \
14388                                                 graph.child_nodes(parent_node,
14389                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14390                                         if pkg in unsatisfied:
14391                                                 unsatisfied_parents[parent_node] = parent_node
14392                                                 unsatisfied_stack.append(parent_node)
14393
14394                         pruned_mergelist = []
14395                         for x in mergelist:
14396                                 if isinstance(x, list) and \
14397                                         tuple(x) not in unsatisfied_parents:
14398                                         pruned_mergelist.append(x)
14399
14400                         # If the mergelist doesn't shrink then this loop is infinite.
14401                         if len(pruned_mergelist) == len(mergelist):
14402                                 # This happens if a package can't be dropped because
14403                                 # it's already installed, but it has unsatisfied PDEPEND.
14404                                 raise
14405                         mergelist[:] = pruned_mergelist
14406
14407                         # Exclude installed packages that have been removed from the graph due
14408                         # to failure to build/install runtime dependencies after the dependent
14409                         # package has already been installed.
14410                         dropped_tasks.update(pkg for pkg in \
14411                                 unsatisfied_parents if pkg.operation != "nomerge")
14412                         mydepgraph.break_refs(unsatisfied_parents)
14413
14414                         del e, graph, traversed_nodes, \
14415                                 unsatisfied_parents, unsatisfied_stack
14416                         continue
14417                 else:
14418                         break
14419         return (success, mydepgraph, dropped_tasks)
14420
14421 def action_build(settings, trees, mtimedb,
14422         myopts, myaction, myfiles, spinner):
14423
14424         # validate the state of the resume data
14425         # so that we can make assumptions later.
14426         for k in ("resume", "resume_backup"):
14427                 if k not in mtimedb:
14428                         continue
14429                 resume_data = mtimedb[k]
14430                 if not isinstance(resume_data, dict):
14431                         del mtimedb[k]
14432                         continue
14433                 mergelist = resume_data.get("mergelist")
14434                 if not isinstance(mergelist, list):
14435                         del mtimedb[k]
14436                         continue
14437                 for x in mergelist:
14438                         if not (isinstance(x, list) and len(x) == 4):
14439                                 continue
14440                         pkg_type, pkg_root, pkg_key, pkg_action = x
14441                         if pkg_root not in trees:
14442                                 # Current $ROOT setting differs,
14443                                 # so the list must be stale.
14444                                 mergelist = None
14445                                 break
14446                 if not mergelist:
14447                         del mtimedb[k]
14448                         continue
14449                 resume_opts = resume_data.get("myopts")
14450                 if not isinstance(resume_opts, (dict, list)):
14451                         del mtimedb[k]
14452                         continue
14453                 favorites = resume_data.get("favorites")
14454                 if not isinstance(favorites, list):
14455                         del mtimedb[k]
14456                         continue
14457
14458         resume = False
14459         if "--resume" in myopts and \
14460                 ("resume" in mtimedb or
14461                 "resume_backup" in mtimedb):
14462                 resume = True
14463                 if "resume" not in mtimedb:
14464                         mtimedb["resume"] = mtimedb["resume_backup"]
14465                         del mtimedb["resume_backup"]
14466                         mtimedb.commit()
14467                 # "myopts" is a list for backward compatibility.
14468                 resume_opts = mtimedb["resume"].get("myopts", [])
14469                 if isinstance(resume_opts, list):
14470                         resume_opts = dict((k,True) for k in resume_opts)
14471                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14472                         resume_opts.pop(opt, None)
14473                 myopts.update(resume_opts)
14474
14475                 if "--debug" in myopts:
14476                         writemsg_level("myopts %s\n" % (myopts,))
14477
14478                 # Adjust config according to options of the command being resumed.
14479                 for myroot in trees:
14480                         mysettings =  trees[myroot]["vartree"].settings
14481                         mysettings.unlock()
14482                         adjust_config(myopts, mysettings)
14483                         mysettings.lock()
14484                         del myroot, mysettings
14485
14486         ldpath_mtimes = mtimedb["ldpath"]
14487         favorites=[]
14488         merge_count = 0
14489         buildpkgonly = "--buildpkgonly" in myopts
14490         pretend = "--pretend" in myopts
14491         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14492         ask = "--ask" in myopts
14493         nodeps = "--nodeps" in myopts
14494         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14495         tree = "--tree" in myopts
14496         if nodeps and tree:
14497                 tree = False
14498                 del myopts["--tree"]
14499                 portage.writemsg(colorize("WARN", " * ") + \
14500                         "--tree is broken with --nodeps. Disabling...\n")
14501         debug = "--debug" in myopts
14502         verbose = "--verbose" in myopts
14503         quiet = "--quiet" in myopts
14504         if pretend or fetchonly:
14505                 # make the mtimedb readonly
14506                 mtimedb.filename = None
14507         if '--digest' in myopts or 'digest' in settings.features:
14508                 if '--digest' in myopts:
14509                         msg = "The --digest option"
14510                 else:
14511                         msg = "The FEATURES=digest setting"
14512
14513                 msg += " can prevent corruption from being" + \
14514                         " noticed. The `repoman manifest` command is the preferred" + \
14515                         " way to generate manifests and it is capable of doing an" + \
14516                         " entire repository or category at once."
14517                 prefix = bad(" * ")
14518                 writemsg(prefix + "\n")
14519                 from textwrap import wrap
14520                 for line in wrap(msg, 72):
14521                         writemsg("%s%s\n" % (prefix, line))
14522                 writemsg(prefix + "\n")
14523
14524         if "--quiet" not in myopts and \
14525                 ("--pretend" in myopts or "--ask" in myopts or \
14526                 "--tree" in myopts or "--verbose" in myopts):
14527                 action = ""
14528                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14529                         action = "fetched"
14530                 elif "--buildpkgonly" in myopts:
14531                         action = "built"
14532                 else:
14533                         action = "merged"
14534                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14535                         print
14536                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14537                         print
14538                 else:
14539                         print
14540                         print darkgreen("These are the packages that would be %s, in order:") % action
14541                         print
14542
14543         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14544         if not show_spinner:
14545                 spinner.update = spinner.update_quiet
14546
14547         if resume:
14548                 favorites = mtimedb["resume"].get("favorites")
14549                 if not isinstance(favorites, list):
14550                         favorites = []
14551
14552                 if show_spinner:
14553                         print "Calculating dependencies  ",
14554                 myparams = create_depgraph_params(myopts, myaction)
14555
14556                 resume_data = mtimedb["resume"]
14557                 mergelist = resume_data["mergelist"]
14558                 if mergelist and "--skipfirst" in myopts:
14559                         for i, task in enumerate(mergelist):
14560                                 if isinstance(task, list) and \
14561                                         task and task[-1] == "merge":
14562                                         del mergelist[i]
14563                                         break
14564
14565                 success = False
14566                 mydepgraph = None
14567                 try:
14568                         success, mydepgraph, dropped_tasks = resume_depgraph(
14569                                 settings, trees, mtimedb, myopts, myparams, spinner)
14570                 except (portage.exception.PackageNotFound,
14571                         depgraph.UnsatisfiedResumeDep), e:
14572                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14573                                 mydepgraph = e.depgraph
14574                         if show_spinner:
14575                                 print
14576                         from textwrap import wrap
14577                         from portage.output import EOutput
14578                         out = EOutput()
14579
14580                         resume_data = mtimedb["resume"]
14581                         mergelist = resume_data.get("mergelist")
14582                         if not isinstance(mergelist, list):
14583                                 mergelist = []
14584                         if mergelist and debug or (verbose and not quiet):
14585                                 out.eerror("Invalid resume list:")
14586                                 out.eerror("")
14587                                 indent = "  "
14588                                 for task in mergelist:
14589                                         if isinstance(task, list):
14590                                                 out.eerror(indent + str(tuple(task)))
14591                                 out.eerror("")
14592
14593                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14594                                 out.eerror("One or more packages are either masked or " + \
14595                                         "have missing dependencies:")
14596                                 out.eerror("")
14597                                 indent = "  "
14598                                 for dep in e.value:
14599                                         if dep.atom is None:
14600                                                 out.eerror(indent + "Masked package:")
14601                                                 out.eerror(2 * indent + str(dep.parent))
14602                                                 out.eerror("")
14603                                         else:
14604                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14605                                                 out.eerror(2 * indent + str(dep.parent))
14606                                                 out.eerror("")
14607                                 msg = "The resume list contains packages " + \
14608                                         "that are either masked or have " + \
14609                                         "unsatisfied dependencies. " + \
14610                                         "Please restart/continue " + \
14611                                         "the operation manually, or use --skipfirst " + \
14612                                         "to skip the first package in the list and " + \
14613                                         "any other packages that may be " + \
14614                                         "masked or have missing dependencies."
14615                                 for line in wrap(msg, 72):
14616                                         out.eerror(line)
14617                         elif isinstance(e, portage.exception.PackageNotFound):
14618                                 out.eerror("An expected package is " + \
14619                                         "not available: %s" % str(e))
14620                                 out.eerror("")
14621                                 msg = "The resume list contains one or more " + \
14622                                         "packages that are no longer " + \
14623                                         "available. Please restart/continue " + \
14624                                         "the operation manually."
14625                                 for line in wrap(msg, 72):
14626                                         out.eerror(line)
14627                 else:
14628                         if show_spinner:
14629                                 print "\b\b... done!"
14630
14631                 if success:
14632                         if dropped_tasks:
14633                                 portage.writemsg("!!! One or more packages have been " + \
14634                                         "dropped due to\n" + \
14635                                         "!!! masking or unsatisfied dependencies:\n\n",
14636                                         noiselevel=-1)
14637                                 for task in dropped_tasks:
14638                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
14639                                 portage.writemsg("\n", noiselevel=-1)
14640                         del dropped_tasks
14641                 else:
14642                         if mydepgraph is not None:
14643                                 mydepgraph.display_problems()
14644                         if not (ask or pretend):
14645                                 # delete the current list and also the backup
14646                                 # since it's probably stale too.
14647                                 for k in ("resume", "resume_backup"):
14648                                         mtimedb.pop(k, None)
14649                                 mtimedb.commit()
14650
14651                         return 1
14652         else:
14653                 if ("--resume" in myopts):
14654                         print darkgreen("emerge: It seems we have nothing to resume...")
14655                         return os.EX_OK
14656
14657                 myparams = create_depgraph_params(myopts, myaction)
14658                 if "--quiet" not in myopts and "--nodeps" not in myopts:
14659                         print "Calculating dependencies  ",
14660                         sys.stdout.flush()
14661                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
14662                 try:
14663                         retval, favorites = mydepgraph.select_files(myfiles)
14664                 except portage.exception.PackageNotFound, e:
14665                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
14666                         return 1
14667                 except portage.exception.PackageSetNotFound, e:
14668                         root_config = trees[settings["ROOT"]]["root_config"]
14669                         display_missing_pkg_set(root_config, e.value)
14670                         return 1
14671                 if show_spinner:
14672                         print "\b\b... done!"
14673                 if not retval:
14674                         mydepgraph.display_problems()
14675                         return 1
14676
14677         if "--pretend" not in myopts and \
14678                 ("--ask" in myopts or "--tree" in myopts or \
14679                 "--verbose" in myopts) and \
14680                 not ("--quiet" in myopts and "--ask" not in myopts):
14681                 if "--resume" in myopts:
14682                         mymergelist = mydepgraph.altlist()
14683                         if len(mymergelist) == 0:
14684                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14685                                 return os.EX_OK
14686                         favorites = mtimedb["resume"]["favorites"]
14687                         retval = mydepgraph.display(
14688                                 mydepgraph.altlist(reversed=tree),
14689                                 favorites=favorites)
14690                         mydepgraph.display_problems()
14691                         if retval != os.EX_OK:
14692                                 return retval
14693                         prompt="Would you like to resume merging these packages?"
14694                 else:
14695                         retval = mydepgraph.display(
14696                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14697                                 favorites=favorites)
14698                         mydepgraph.display_problems()
14699                         if retval != os.EX_OK:
14700                                 return retval
14701                         mergecount=0
14702                         for x in mydepgraph.altlist():
14703                                 if isinstance(x, Package) and x.operation == "merge":
14704                                         mergecount += 1
14705
14706                         if mergecount==0:
14707                                 sets = trees[settings["ROOT"]]["root_config"].sets
14708                                 world_candidates = None
14709                                 if "--noreplace" in myopts and \
14710                                         not oneshot and favorites:
14711                                         # Sets that are not world candidates are filtered
14712                                         # out here since the favorites list needs to be
14713                                         # complete for depgraph.loadResumeCommand() to
14714                                         # operate correctly.
14715                                         world_candidates = [x for x in favorites \
14716                                                 if not (x.startswith(SETPREFIX) and \
14717                                                 not sets[x[1:]].world_candidate)]
14718                                 if "--noreplace" in myopts and \
14719                                         not oneshot and world_candidates:
14720                                         print
14721                                         for x in world_candidates:
14722                                                 print " %s %s" % (good("*"), x)
14723                                         prompt="Would you like to add these packages to your world favorites?"
14724                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14725                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14726                                 else:
14727                                         print
14728                                         print "Nothing to merge; quitting."
14729                                         print
14730                                         return os.EX_OK
14731                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14732                                 prompt="Would you like to fetch the source files for these packages?"
14733                         else:
14734                                 prompt="Would you like to merge these packages?"
14735                 print
14736                 if "--ask" in myopts and userquery(prompt) == "No":
14737                         print
14738                         print "Quitting."
14739                         print
14740                         return os.EX_OK
14741                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14742                 myopts.pop("--ask", None)
14743
14744         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14745                 if ("--resume" in myopts):
14746                         mymergelist = mydepgraph.altlist()
14747                         if len(mymergelist) == 0:
14748                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14749                                 return os.EX_OK
14750                         favorites = mtimedb["resume"]["favorites"]
14751                         retval = mydepgraph.display(
14752                                 mydepgraph.altlist(reversed=tree),
14753                                 favorites=favorites)
14754                         mydepgraph.display_problems()
14755                         if retval != os.EX_OK:
14756                                 return retval
14757                 else:
14758                         retval = mydepgraph.display(
14759                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14760                                 favorites=favorites)
14761                         mydepgraph.display_problems()
14762                         if retval != os.EX_OK:
14763                                 return retval
14764                         if "--buildpkgonly" in myopts:
14765                                 graph_copy = mydepgraph.digraph.clone()
14766                                 removed_nodes = set()
14767                                 for node in graph_copy:
14768                                         if not isinstance(node, Package) or \
14769                                                 node.operation == "nomerge":
14770                                                 removed_nodes.add(node)
14771                                 graph_copy.difference_update(removed_nodes)
14772                                 if not graph_copy.hasallzeros(ignore_priority = \
14773                                         DepPrioritySatisfiedRange.ignore_medium):
14774                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14775                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14776                                         return 1
14777         else:
14778                 if "--buildpkgonly" in myopts:
14779                         graph_copy = mydepgraph.digraph.clone()
14780                         removed_nodes = set()
14781                         for node in graph_copy:
14782                                 if not isinstance(node, Package) or \
14783                                         node.operation == "nomerge":
14784                                         removed_nodes.add(node)
14785                         graph_copy.difference_update(removed_nodes)
14786                         if not graph_copy.hasallzeros(ignore_priority = \
14787                                 DepPrioritySatisfiedRange.ignore_medium):
14788                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14789                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14790                                 return 1
14791
14792                 if ("--resume" in myopts):
14793                         favorites=mtimedb["resume"]["favorites"]
14794                         mymergelist = mydepgraph.altlist()
14795                         mydepgraph.break_refs(mymergelist)
14796                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14797                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14798                         del mydepgraph, mymergelist
14799                         clear_caches(trees)
14800
14801                         retval = mergetask.merge()
14802                         merge_count = mergetask.curval
14803                 else:
14804                         if "resume" in mtimedb and \
14805                         "mergelist" in mtimedb["resume"] and \
14806                         len(mtimedb["resume"]["mergelist"]) > 1:
14807                                 mtimedb["resume_backup"] = mtimedb["resume"]
14808                                 del mtimedb["resume"]
14809                                 mtimedb.commit()
14810                         mtimedb["resume"]={}
14811                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14812                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14813                         # a list type for options.
14814                         mtimedb["resume"]["myopts"] = myopts.copy()
14815
14816                         # Convert Atom instances to plain str.
14817                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14818
14819                         pkglist = mydepgraph.altlist()
14820                         mydepgraph.saveNomergeFavorites()
14821                         mydepgraph.break_refs(pkglist)
14822                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14823                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14824                         del mydepgraph, pkglist
14825                         clear_caches(trees)
14826
14827                         retval = mergetask.merge()
14828                         merge_count = mergetask.curval
14829
14830                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14831                         if "yes" == settings.get("AUTOCLEAN"):
14832                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14833                                 unmerge(trees[settings["ROOT"]]["root_config"],
14834                                         myopts, "clean", [],
14835                                         ldpath_mtimes, autoclean=1)
14836                         else:
14837                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14838                                         + " AUTOCLEAN is disabled.  This can cause serious"
14839                                         + " problems due to overlapping packages.\n")
14840                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14841
14842                 return retval
14843
14844 def multiple_actions(action1, action2):
14845         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14846         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14847         sys.exit(1)
14848
14849 def insert_optional_args(args):
14850         """
14851         Parse optional arguments and insert a value if one has
14852         not been provided. This is done before feeding the args
14853         to the optparse parser since that parser does not support
14854         this feature natively.
14855         """
14856
14857         new_args = []
14858         jobs_opts = ("-j", "--jobs")
14859         root_deps_opt = '--root-deps'
14860         root_deps_choices = ('True', 'rdeps')
14861         arg_stack = args[:]
14862         arg_stack.reverse()
14863         while arg_stack:
14864                 arg = arg_stack.pop()
14865
14866                 if arg == root_deps_opt:
14867                         new_args.append(arg)
14868                         if arg_stack and arg_stack[-1] in root_deps_choices:
14869                                 new_args.append(arg_stack.pop())
14870                         else:
14871                                 # insert default argument
14872                                 new_args.append('True')
14873                         continue
14874
14875                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14876                 if not (short_job_opt or arg in jobs_opts):
14877                         new_args.append(arg)
14878                         continue
14879
14880                 # Insert an empty placeholder in order to
14881                 # satisfy the requirements of optparse.
14882
14883                 new_args.append("--jobs")
14884                 job_count = None
14885                 saved_opts = None
14886                 if short_job_opt and len(arg) > 2:
14887                         if arg[:2] == "-j":
14888                                 try:
14889                                         job_count = int(arg[2:])
14890                                 except ValueError:
14891                                         saved_opts = arg[2:]
14892                         else:
14893                                 job_count = "True"
14894                                 saved_opts = arg[1:].replace("j", "")
14895
14896                 if job_count is None and arg_stack:
14897                         try:
14898                                 job_count = int(arg_stack[-1])
14899                         except ValueError:
14900                                 pass
14901                         else:
14902                                 # Discard the job count from the stack
14903                                 # since we're consuming it here.
14904                                 arg_stack.pop()
14905
14906                 if job_count is None:
14907                         # unlimited number of jobs
14908                         new_args.append("True")
14909                 else:
14910                         new_args.append(str(job_count))
14911
14912                 if saved_opts is not None:
14913                         new_args.append("-" + saved_opts)
14914
14915         return new_args
14916
14917 def parse_opts(tmpcmdline, silent=False):
14918         myaction=None
14919         myopts = {}
14920         myfiles=[]
14921
14922         global actions, options, shortmapping
14923
14924         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14925         argument_options = {
14926                 "--config-root": {
14927                         "help":"specify the location for portage configuration files",
14928                         "action":"store"
14929                 },
14930                 "--color": {
14931                         "help":"enable or disable color output",
14932                         "type":"choice",
14933                         "choices":("y", "n")
14934                 },
14935
14936                 "--jobs": {
14937
14938                         "help"   : "Specifies the number of packages to build " + \
14939                                 "simultaneously.",
14940
14941                         "action" : "store"
14942                 },
14943
14944                 "--load-average": {
14945
14946                         "help"   :"Specifies that no new builds should be started " + \
14947                                 "if there are other builds running and the load average " + \
14948                                 "is at least LOAD (a floating-point number).",
14949
14950                         "action" : "store"
14951                 },
14952
14953                 "--with-bdeps": {
14954                         "help":"include unnecessary build time dependencies",
14955                         "type":"choice",
14956                         "choices":("y", "n")
14957                 },
14958                 "--reinstall": {
14959                         "help":"specify conditions to trigger package reinstallation",
14960                         "type":"choice",
14961                         "choices":["changed-use"]
14962                 },
14963                 "--root": {
14964                  "help"   : "specify the target root filesystem for merging packages",
14965                  "action" : "store"
14966                 },
14967
14968                 "--root-deps": {
14969                         "help"    : "modify interpretation of depedencies",
14970                         "type"    : "choice",
14971                         "choices" :("True", "rdeps")
14972                 },
14973         }
14974
14975         from optparse import OptionParser
14976         parser = OptionParser()
14977         if parser.has_option("--help"):
14978                 parser.remove_option("--help")
14979
14980         for action_opt in actions:
14981                 parser.add_option("--" + action_opt, action="store_true",
14982                         dest=action_opt.replace("-", "_"), default=False)
14983         for myopt in options:
14984                 parser.add_option(myopt, action="store_true",
14985                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14986         for shortopt, longopt in shortmapping.iteritems():
14987                 parser.add_option("-" + shortopt, action="store_true",
14988                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14989         for myalias, myopt in longopt_aliases.iteritems():
14990                 parser.add_option(myalias, action="store_true",
14991                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14992
14993         for myopt, kwargs in argument_options.iteritems():
14994                 parser.add_option(myopt,
14995                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14996
14997         tmpcmdline = insert_optional_args(tmpcmdline)
14998
14999         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15000
15001         if myoptions.root_deps == "True":
15002                 myoptions.root_deps = True
15003
15004         if myoptions.jobs:
15005                 jobs = None
15006                 if myoptions.jobs == "True":
15007                         jobs = True
15008                 else:
15009                         try:
15010                                 jobs = int(myoptions.jobs)
15011                         except ValueError:
15012                                 jobs = -1
15013
15014                 if jobs is not True and \
15015                         jobs < 1:
15016                         jobs = None
15017                         if not silent:
15018                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15019                                         (myoptions.jobs,), noiselevel=-1)
15020
15021                 myoptions.jobs = jobs
15022
15023         if myoptions.load_average:
15024                 try:
15025                         load_average = float(myoptions.load_average)
15026                 except ValueError:
15027                         load_average = 0.0
15028
15029                 if load_average <= 0.0:
15030                         load_average = None
15031                         if not silent:
15032                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15033                                         (myoptions.load_average,), noiselevel=-1)
15034
15035                 myoptions.load_average = load_average
15036
15037         for myopt in options:
15038                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15039                 if v:
15040                         myopts[myopt] = True
15041
15042         for myopt in argument_options:
15043                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15044                 if v is not None:
15045                         myopts[myopt] = v
15046
15047         if myoptions.searchdesc:
15048                 myoptions.search = True
15049
15050         for action_opt in actions:
15051                 v = getattr(myoptions, action_opt.replace("-", "_"))
15052                 if v:
15053                         if myaction:
15054                                 multiple_actions(myaction, action_opt)
15055                                 sys.exit(1)
15056                         myaction = action_opt
15057
15058         myfiles += myargs
15059
15060         return myaction, myopts, myfiles
15061
15062 def validate_ebuild_environment(trees):
15063         for myroot in trees:
15064                 settings = trees[myroot]["vartree"].settings
15065                 settings.validate()
15066
15067 def clear_caches(trees):
15068         for d in trees.itervalues():
15069                 d["porttree"].dbapi.melt()
15070                 d["porttree"].dbapi._aux_cache.clear()
15071                 d["bintree"].dbapi._aux_cache.clear()
15072                 d["bintree"].dbapi._clear_cache()
15073                 d["vartree"].dbapi.linkmap._clear_cache()
15074         portage.dircache.clear()
15075         gc.collect()
15076
15077 def load_emerge_config(trees=None):
15078         kwargs = {}
15079         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15080                 v = os.environ.get(envvar, None)
15081                 if v and v.strip():
15082                         kwargs[k] = v
15083         trees = portage.create_trees(trees=trees, **kwargs)
15084
15085         for root, root_trees in trees.iteritems():
15086                 settings = root_trees["vartree"].settings
15087                 setconfig = load_default_config(settings, root_trees)
15088                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15089
15090         settings = trees["/"]["vartree"].settings
15091
15092         for myroot in trees:
15093                 if myroot != "/":
15094                         settings = trees[myroot]["vartree"].settings
15095                         break
15096
15097         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15098         mtimedb = portage.MtimeDB(mtimedbfile)
15099         
15100         return settings, trees, mtimedb
15101
15102 def adjust_config(myopts, settings):
15103         """Make emerge specific adjustments to the config."""
15104
15105         # To enhance usability, make some vars case insensitive by forcing them to
15106         # lower case.
15107         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15108                 if myvar in settings:
15109                         settings[myvar] = settings[myvar].lower()
15110                         settings.backup_changes(myvar)
15111         del myvar
15112
15113         # Kill noauto as it will break merges otherwise.
15114         if "noauto" in settings.features:
15115                 settings.features.remove('noauto')
15116                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15117                 settings.backup_changes("FEATURES")
15118
15119         CLEAN_DELAY = 5
15120         try:
15121                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15122         except ValueError, e:
15123                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15124                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15125                         settings["CLEAN_DELAY"], noiselevel=-1)
15126         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15127         settings.backup_changes("CLEAN_DELAY")
15128
15129         EMERGE_WARNING_DELAY = 10
15130         try:
15131                 EMERGE_WARNING_DELAY = int(settings.get(
15132                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15133         except ValueError, e:
15134                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15135                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15136                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15137         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15138         settings.backup_changes("EMERGE_WARNING_DELAY")
15139
15140         if "--quiet" in myopts:
15141                 settings["PORTAGE_QUIET"]="1"
15142                 settings.backup_changes("PORTAGE_QUIET")
15143
15144         if "--verbose" in myopts:
15145                 settings["PORTAGE_VERBOSE"] = "1"
15146                 settings.backup_changes("PORTAGE_VERBOSE")
15147
15148         # Set so that configs will be merged regardless of remembered status
15149         if ("--noconfmem" in myopts):
15150                 settings["NOCONFMEM"]="1"
15151                 settings.backup_changes("NOCONFMEM")
15152
15153         # Set various debug markers... They should be merged somehow.
15154         PORTAGE_DEBUG = 0
15155         try:
15156                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15157                 if PORTAGE_DEBUG not in (0, 1):
15158                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15159                                 PORTAGE_DEBUG, noiselevel=-1)
15160                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15161                                 noiselevel=-1)
15162                         PORTAGE_DEBUG = 0
15163         except ValueError, e:
15164                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15165                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15166                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15167                 del e
15168         if "--debug" in myopts:
15169                 PORTAGE_DEBUG = 1
15170         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15171         settings.backup_changes("PORTAGE_DEBUG")
15172
15173         if settings.get("NOCOLOR") not in ("yes","true"):
15174                 portage.output.havecolor = 1
15175
15176         """The explicit --color < y | n > option overrides the NOCOLOR environment
15177         variable and stdout auto-detection."""
15178         if "--color" in myopts:
15179                 if "y" == myopts["--color"]:
15180                         portage.output.havecolor = 1
15181                         settings["NOCOLOR"] = "false"
15182                 else:
15183                         portage.output.havecolor = 0
15184                         settings["NOCOLOR"] = "true"
15185                 settings.backup_changes("NOCOLOR")
15186         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15187                 portage.output.havecolor = 0
15188                 settings["NOCOLOR"] = "true"
15189                 settings.backup_changes("NOCOLOR")
15190
15191 def apply_priorities(settings):
15192         ionice(settings)
15193         nice(settings)
15194
15195 def nice(settings):
15196         try:
15197                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15198         except (OSError, ValueError), e:
15199                 out = portage.output.EOutput()
15200                 out.eerror("Failed to change nice value to '%s'" % \
15201                         settings["PORTAGE_NICENESS"])
15202                 out.eerror("%s\n" % str(e))
15203
15204 def ionice(settings):
15205
15206         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15207         if ionice_cmd:
15208                 ionice_cmd = shlex.split(ionice_cmd)
15209         if not ionice_cmd:
15210                 return
15211
15212         from portage.util import varexpand
15213         variables = {"PID" : str(os.getpid())}
15214         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15215
15216         try:
15217                 rval = portage.process.spawn(cmd, env=os.environ)
15218         except portage.exception.CommandNotFound:
15219                 # The OS kernel probably doesn't support ionice,
15220                 # so return silently.
15221                 return
15222
15223         if rval != os.EX_OK:
15224                 out = portage.output.EOutput()
15225                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15226                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15227
15228 def display_missing_pkg_set(root_config, set_name):
15229
15230         msg = []
15231         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15232                 "The following sets exist:") % \
15233                 colorize("INFORM", set_name))
15234         msg.append("")
15235
15236         for s in sorted(root_config.sets):
15237                 msg.append("    %s" % s)
15238         msg.append("")
15239
15240         writemsg_level("".join("%s\n" % l for l in msg),
15241                 level=logging.ERROR, noiselevel=-1)
15242
15243 def expand_set_arguments(myfiles, myaction, root_config):
15244         retval = os.EX_OK
15245         setconfig = root_config.setconfig
15246
15247         sets = setconfig.getSets()
15248
15249         # In order to know exactly which atoms/sets should be added to the
15250         # world file, the depgraph performs set expansion later. It will get
15251         # confused about where the atoms came from if it's not allowed to
15252         # expand them itself.
15253         do_not_expand = (None, )
15254         newargs = []
15255         for a in myfiles:
15256                 if a in ("system", "world"):
15257                         newargs.append(SETPREFIX+a)
15258                 else:
15259                         newargs.append(a)
15260         myfiles = newargs
15261         del newargs
15262         newargs = []
15263
15264         # separators for set arguments
15265         ARG_START = "{"
15266         ARG_END = "}"
15267
15268         # WARNING: all operators must be of equal length
15269         IS_OPERATOR = "/@"
15270         DIFF_OPERATOR = "-@"
15271         UNION_OPERATOR = "+@"
15272         
15273         for i in range(0, len(myfiles)):
15274                 if myfiles[i].startswith(SETPREFIX):
15275                         start = 0
15276                         end = 0
15277                         x = myfiles[i][len(SETPREFIX):]
15278                         newset = ""
15279                         while x:
15280                                 start = x.find(ARG_START)
15281                                 end = x.find(ARG_END)
15282                                 if start > 0 and start < end:
15283                                         namepart = x[:start]
15284                                         argpart = x[start+1:end]
15285                                 
15286                                         # TODO: implement proper quoting
15287                                         args = argpart.split(",")
15288                                         options = {}
15289                                         for a in args:
15290                                                 if "=" in a:
15291                                                         k, v  = a.split("=", 1)
15292                                                         options[k] = v
15293                                                 else:
15294                                                         options[a] = "True"
15295                                         setconfig.update(namepart, options)
15296                                         newset += (x[:start-len(namepart)]+namepart)
15297                                         x = x[end+len(ARG_END):]
15298                                 else:
15299                                         newset += x
15300                                         x = ""
15301                         myfiles[i] = SETPREFIX+newset
15302                                 
15303         sets = setconfig.getSets()
15304
15305         # display errors that occured while loading the SetConfig instance
15306         for e in setconfig.errors:
15307                 print colorize("BAD", "Error during set creation: %s" % e)
15308         
15309         # emerge relies on the existance of sets with names "world" and "system"
15310         required_sets = ("world", "system")
15311         missing_sets = []
15312
15313         for s in required_sets:
15314                 if s not in sets:
15315                         missing_sets.append(s)
15316         if missing_sets:
15317                 if len(missing_sets) > 2:
15318                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15319                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15320                 elif len(missing_sets) == 2:
15321                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15322                 else:
15323                         missing_sets_str = '"%s"' % missing_sets[-1]
15324                 msg = ["emerge: incomplete set configuration, " + \
15325                         "missing set(s): %s" % missing_sets_str]
15326                 if sets:
15327                         msg.append("        sets defined: %s" % ", ".join(sets))
15328                 msg.append("        This usually means that '%s'" % \
15329                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15330                 msg.append("        is missing or corrupt.")
15331                 for line in msg:
15332                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15333                 return (None, 1)
15334         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15335
15336         for a in myfiles:
15337                 if a.startswith(SETPREFIX):
15338                         # support simple set operations (intersection, difference and union)
15339                         # on the commandline. Expressions are evaluated strictly left-to-right
15340                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15341                                 expression = a[len(SETPREFIX):]
15342                                 expr_sets = []
15343                                 expr_ops = []
15344                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15345                                         is_pos = expression.rfind(IS_OPERATOR)
15346                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15347                                         union_pos = expression.rfind(UNION_OPERATOR)
15348                                         op_pos = max(is_pos, diff_pos, union_pos)
15349                                         s1 = expression[:op_pos]
15350                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15351                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15352                                         if not s2 in sets:
15353                                                 display_missing_pkg_set(root_config, s2)
15354                                                 return (None, 1)
15355                                         expr_sets.insert(0, s2)
15356                                         expr_ops.insert(0, op)
15357                                         expression = s1
15358                                 if not expression in sets:
15359                                         display_missing_pkg_set(root_config, expression)
15360                                         return (None, 1)
15361                                 expr_sets.insert(0, expression)
15362                                 result = set(setconfig.getSetAtoms(expression))
15363                                 for i in range(0, len(expr_ops)):
15364                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15365                                         if expr_ops[i] == IS_OPERATOR:
15366                                                 result.intersection_update(s2)
15367                                         elif expr_ops[i] == DIFF_OPERATOR:
15368                                                 result.difference_update(s2)
15369                                         elif expr_ops[i] == UNION_OPERATOR:
15370                                                 result.update(s2)
15371                                         else:
15372                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15373                                 newargs.extend(result)
15374                         else:                   
15375                                 s = a[len(SETPREFIX):]
15376                                 if s not in sets:
15377                                         display_missing_pkg_set(root_config, s)
15378                                         return (None, 1)
15379                                 setconfig.active.append(s)
15380                                 try:
15381                                         set_atoms = setconfig.getSetAtoms(s)
15382                                 except portage.exception.PackageSetNotFound, e:
15383                                         writemsg_level(("emerge: the given set '%s' " + \
15384                                                 "contains a non-existent set named '%s'.\n") % \
15385                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15386                                         return (None, 1)
15387                                 if myaction in unmerge_actions and \
15388                                                 not sets[s].supportsOperation("unmerge"):
15389                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15390                                                 "not support unmerge operations\n")
15391                                         retval = 1
15392                                 elif not set_atoms:
15393                                         print "emerge: '%s' is an empty set" % s
15394                                 elif myaction not in do_not_expand:
15395                                         newargs.extend(set_atoms)
15396                                 else:
15397                                         newargs.append(SETPREFIX+s)
15398                                 for e in sets[s].errors:
15399                                         print e
15400                 else:
15401                         newargs.append(a)
15402         return (newargs, retval)
15403
15404 def repo_name_check(trees):
15405         missing_repo_names = set()
15406         for root, root_trees in trees.iteritems():
15407                 if "porttree" in root_trees:
15408                         portdb = root_trees["porttree"].dbapi
15409                         missing_repo_names.update(portdb.porttrees)
15410                         repos = portdb.getRepositories()
15411                         for r in repos:
15412                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15413                         if portdb.porttree_root in missing_repo_names and \
15414                                 not os.path.exists(os.path.join(
15415                                 portdb.porttree_root, "profiles")):
15416                                 # This is normal if $PORTDIR happens to be empty,
15417                                 # so don't warn about it.
15418                                 missing_repo_names.remove(portdb.porttree_root)
15419
15420         if missing_repo_names:
15421                 msg = []
15422                 msg.append("WARNING: One or more repositories " + \
15423                         "have missing repo_name entries:")
15424                 msg.append("")
15425                 for p in missing_repo_names:
15426                         msg.append("\t%s/profiles/repo_name" % (p,))
15427                 msg.append("")
15428                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15429                         "should be a plain text file containing a unique " + \
15430                         "name for the repository on the first line.", 70))
15431                 writemsg_level("".join("%s\n" % l for l in msg),
15432                         level=logging.WARNING, noiselevel=-1)
15433
15434         return bool(missing_repo_names)
15435
15436 def config_protect_check(trees):
15437         for root, root_trees in trees.iteritems():
15438                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15439                         msg = "!!! CONFIG_PROTECT is empty"
15440                         if root != "/":
15441                                 msg += " for '%s'" % root
15442                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15443
15444 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15445
15446         if "--quiet" in myopts:
15447                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15448                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15449                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15450                         print "    " + colorize("INFORM", cp)
15451                 return
15452
15453         s = search(root_config, spinner, "--searchdesc" in myopts,
15454                 "--quiet" not in myopts, "--usepkg" in myopts,
15455                 "--usepkgonly" in myopts)
15456         null_cp = portage.dep_getkey(insert_category_into_atom(
15457                 arg, "null"))
15458         cat, atom_pn = portage.catsplit(null_cp)
15459         s.searchkey = atom_pn
15460         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15461                 s.addCP(cp)
15462         s.output()
15463         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15464         print "!!! one of the above fully-qualified ebuild names instead.\n"
15465
15466 def profile_check(trees, myaction, myopts):
15467         if myaction in ("info", "sync"):
15468                 return os.EX_OK
15469         elif "--version" in myopts or "--help" in myopts:
15470                 return os.EX_OK
15471         for root, root_trees in trees.iteritems():
15472                 if root_trees["root_config"].settings.profiles:
15473                         continue
15474                 # generate some profile related warning messages
15475                 validate_ebuild_environment(trees)
15476                 msg = "If you have just changed your profile configuration, you " + \
15477                         "should revert back to the previous configuration. Due to " + \
15478                         "your current profile being invalid, allowed actions are " + \
15479                         "limited to --help, --info, --sync, and --version."
15480                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15481                         level=logging.ERROR, noiselevel=-1)
15482                 return 1
15483         return os.EX_OK
15484
15485 def emerge_main():
15486         global portage  # NFC why this is necessary now - genone
15487         portage._disable_legacy_globals()
15488         # Disable color until we're sure that it should be enabled (after
15489         # EMERGE_DEFAULT_OPTS has been parsed).
15490         portage.output.havecolor = 0
15491         # This first pass is just for options that need to be known as early as
15492         # possible, such as --config-root.  They will be parsed again later,
15493         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15494         # the value of --config-root).
15495         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15496         if "--debug" in myopts:
15497                 os.environ["PORTAGE_DEBUG"] = "1"
15498         if "--config-root" in myopts:
15499                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15500         if "--root" in myopts:
15501                 os.environ["ROOT"] = myopts["--root"]
15502
15503         # Portage needs to ensure a sane umask for the files it creates.
15504         os.umask(022)
15505         settings, trees, mtimedb = load_emerge_config()
15506         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15507         rval = profile_check(trees, myaction, myopts)
15508         if rval != os.EX_OK:
15509                 return rval
15510
15511         if portage._global_updates(trees, mtimedb["updates"]):
15512                 mtimedb.commit()
15513                 # Reload the whole config from scratch.
15514                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15515                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15516
15517         xterm_titles = "notitles" not in settings.features
15518
15519         tmpcmdline = []
15520         if "--ignore-default-opts" not in myopts:
15521                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15522         tmpcmdline.extend(sys.argv[1:])
15523         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15524
15525         if "--digest" in myopts:
15526                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15527                 # Reload the whole config from scratch so that the portdbapi internal
15528                 # config is updated with new FEATURES.
15529                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15530                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15531
15532         for myroot in trees:
15533                 mysettings =  trees[myroot]["vartree"].settings
15534                 mysettings.unlock()
15535                 adjust_config(myopts, mysettings)
15536                 if '--pretend' not in myopts and myaction in \
15537                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15538                         mysettings["PORTAGE_COUNTER_HASH"] = \
15539                                 trees[myroot]["vartree"].dbapi._counter_hash()
15540                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15541                 mysettings.lock()
15542                 del myroot, mysettings
15543
15544         apply_priorities(settings)
15545
15546         spinner = stdout_spinner()
15547         if "candy" in settings.features:
15548                 spinner.update = spinner.update_scroll
15549
15550         if "--quiet" not in myopts:
15551                 portage.deprecated_profile_check(settings=settings)
15552                 repo_name_check(trees)
15553                 config_protect_check(trees)
15554
15555         for mytrees in trees.itervalues():
15556                 mydb = mytrees["porttree"].dbapi
15557                 # Freeze the portdbapi for performance (memoize all xmatch results).
15558                 mydb.freeze()
15559         del mytrees, mydb
15560
15561         if "moo" in myfiles:
15562                 print """
15563
15564   Larry loves Gentoo (""" + platform.system() + """)
15565
15566  _______________________
15567 < Have you mooed today? >
15568  -----------------------
15569         \   ^__^
15570          \  (oo)\_______
15571             (__)\       )\/\ 
15572                 ||----w |
15573                 ||     ||
15574
15575 """
15576
15577         for x in myfiles:
15578                 ext = os.path.splitext(x)[1]
15579                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
15580                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
15581                         break
15582
15583         root_config = trees[settings["ROOT"]]["root_config"]
15584         if myaction == "list-sets":
15585                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
15586                 sys.stdout.flush()
15587                 return os.EX_OK
15588
15589         # only expand sets for actions taking package arguments
15590         oldargs = myfiles[:]
15591         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
15592                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
15593                 if retval != os.EX_OK:
15594                         return retval
15595
15596                 # Need to handle empty sets specially, otherwise emerge will react 
15597                 # with the help message for empty argument lists
15598                 if oldargs and not myfiles:
15599                         print "emerge: no targets left after set expansion"
15600                         return 0
15601
15602         if ("--tree" in myopts) and ("--columns" in myopts):
15603                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
15604                 return 1
15605
15606         if ("--quiet" in myopts):
15607                 spinner.update = spinner.update_quiet
15608                 portage.util.noiselimit = -1
15609
15610         # Always create packages if FEATURES=buildpkg
15611         # Imply --buildpkg if --buildpkgonly
15612         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
15613                 if "--buildpkg" not in myopts:
15614                         myopts["--buildpkg"] = True
15615
15616         # Always try and fetch binary packages if FEATURES=getbinpkg
15617         if ("getbinpkg" in settings.features):
15618                 myopts["--getbinpkg"] = True
15619
15620         if "--buildpkgonly" in myopts:
15621                 # --buildpkgonly will not merge anything, so
15622                 # it cancels all binary package options.
15623                 for opt in ("--getbinpkg", "--getbinpkgonly",
15624                         "--usepkg", "--usepkgonly"):
15625                         myopts.pop(opt, None)
15626
15627         if "--fetch-all-uri" in myopts:
15628                 myopts["--fetchonly"] = True
15629
15630         if "--skipfirst" in myopts and "--resume" not in myopts:
15631                 myopts["--resume"] = True
15632
15633         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
15634                 myopts["--usepkgonly"] = True
15635
15636         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
15637                 myopts["--getbinpkg"] = True
15638
15639         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
15640                 myopts["--usepkg"] = True
15641
15642         # Also allow -K to apply --usepkg/-k
15643         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
15644                 myopts["--usepkg"] = True
15645
15646         # Allow -p to remove --ask
15647         if ("--pretend" in myopts) and ("--ask" in myopts):
15648                 print ">>> --pretend disables --ask... removing --ask from options."
15649                 del myopts["--ask"]
15650
15651         # forbid --ask when not in a terminal
15652         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
15653         if ("--ask" in myopts) and (not sys.stdin.isatty()):
15654                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
15655                         noiselevel=-1)
15656                 return 1
15657
15658         if settings.get("PORTAGE_DEBUG", "") == "1":
15659                 spinner.update = spinner.update_quiet
15660                 portage.debug=1
15661                 if "python-trace" in settings.features:
15662                         import portage.debug
15663                         portage.debug.set_trace(True)
15664
15665         if not ("--quiet" in myopts):
15666                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
15667                         spinner.update = spinner.update_basic
15668
15669         if myaction == 'version':
15670                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
15671                         settings.profile_path, settings["CHOST"],
15672                         trees[settings["ROOT"]]["vartree"].dbapi)
15673                 return 0
15674         elif "--help" in myopts:
15675                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15676                 return 0
15677
15678         if "--debug" in myopts:
15679                 print "myaction", myaction
15680                 print "myopts", myopts
15681
15682         if not myaction and not myfiles and "--resume" not in myopts:
15683                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15684                 return 1
15685
15686         pretend = "--pretend" in myopts
15687         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15688         buildpkgonly = "--buildpkgonly" in myopts
15689
15690         # check if root user is the current user for the actions where emerge needs this
15691         if portage.secpass < 2:
15692                 # We've already allowed "--version" and "--help" above.
15693                 if "--pretend" not in myopts and myaction not in ("search","info"):
15694                         need_superuser = not \
15695                                 (fetchonly or \
15696                                 (buildpkgonly and secpass >= 1) or \
15697                                 myaction in ("metadata", "regen") or \
15698                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15699                         if portage.secpass < 1 or \
15700                                 need_superuser:
15701                                 if need_superuser:
15702                                         access_desc = "superuser"
15703                                 else:
15704                                         access_desc = "portage group"
15705                                 # Always show portage_group_warning() when only portage group
15706                                 # access is required but the user is not in the portage group.
15707                                 from portage.data import portage_group_warning
15708                                 if "--ask" in myopts:
15709                                         myopts["--pretend"] = True
15710                                         del myopts["--ask"]
15711                                         print ("%s access is required... " + \
15712                                                 "adding --pretend to options.\n") % access_desc
15713                                         if portage.secpass < 1 and not need_superuser:
15714                                                 portage_group_warning()
15715                                 else:
15716                                         sys.stderr.write(("emerge: %s access is " + \
15717                                                 "required.\n\n") % access_desc)
15718                                         if portage.secpass < 1 and not need_superuser:
15719                                                 portage_group_warning()
15720                                         return 1
15721
15722         disable_emergelog = False
15723         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15724                 if x in myopts:
15725                         disable_emergelog = True
15726                         break
15727         if myaction in ("search", "info"):
15728                 disable_emergelog = True
15729         if disable_emergelog:
15730                 """ Disable emergelog for everything except build or unmerge
15731                 operations.  This helps minimize parallel emerge.log entries that can
15732                 confuse log parsers.  We especially want it disabled during
15733                 parallel-fetch, which uses --resume --fetchonly."""
15734                 global emergelog
15735                 def emergelog(*pargs, **kargs):
15736                         pass
15737
15738         if not "--pretend" in myopts:
15739                 emergelog(xterm_titles, "Started emerge on: "+\
15740                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15741                 myelogstr=""
15742                 if myopts:
15743                         myelogstr=" ".join(myopts)
15744                 if myaction:
15745                         myelogstr+=" "+myaction
15746                 if myfiles:
15747                         myelogstr += " " + " ".join(oldargs)
15748                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15749         del oldargs
15750
15751         def emergeexitsig(signum, frame):
15752                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15753                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15754                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15755                 sys.exit(100+signum)
15756         signal.signal(signal.SIGINT, emergeexitsig)
15757         signal.signal(signal.SIGTERM, emergeexitsig)
15758
15759         def emergeexit():
15760                 """This gets out final log message in before we quit."""
15761                 if "--pretend" not in myopts:
15762                         emergelog(xterm_titles, " *** terminating.")
15763                 if "notitles" not in settings.features:
15764                         xtermTitleReset()
15765         portage.atexit_register(emergeexit)
15766
15767         if myaction in ("config", "metadata", "regen", "sync"):
15768                 if "--pretend" in myopts:
15769                         sys.stderr.write(("emerge: The '%s' action does " + \
15770                                 "not support '--pretend'.\n") % myaction)
15771                         return 1
15772
15773         if "sync" == myaction:
15774                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15775         elif "metadata" == myaction:
15776                 action_metadata(settings, portdb, myopts)
15777         elif myaction=="regen":
15778                 validate_ebuild_environment(trees)
15779                 return action_regen(settings, portdb, myopts.get("--jobs"),
15780                         myopts.get("--load-average"))
15781         # HELP action
15782         elif "config"==myaction:
15783                 validate_ebuild_environment(trees)
15784                 action_config(settings, trees, myopts, myfiles)
15785
15786         # SEARCH action
15787         elif "search"==myaction:
15788                 validate_ebuild_environment(trees)
15789                 action_search(trees[settings["ROOT"]]["root_config"],
15790                         myopts, myfiles, spinner)
15791         elif myaction in ("clean", "unmerge") or \
15792                 (myaction == "prune" and "--nodeps" in myopts):
15793                 validate_ebuild_environment(trees)
15794
15795                 # Ensure atoms are valid before calling unmerge().
15796                 # For backward compat, leading '=' is not required.
15797                 for x in myfiles:
15798                         if is_valid_package_atom(x) or \
15799                                 is_valid_package_atom("=" + x):
15800                                 continue
15801                         msg = []
15802                         msg.append("'%s' is not a valid package atom." % (x,))
15803                         msg.append("Please check ebuild(5) for full details.")
15804                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15805                                 level=logging.ERROR, noiselevel=-1)
15806                         return 1
15807
15808                 # When given a list of atoms, unmerge
15809                 # them in the order given.
15810                 ordered = myaction == "unmerge"
15811                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15812                         mtimedb["ldpath"], ordered=ordered):
15813                         if not (buildpkgonly or fetchonly or pretend):
15814                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15815
15816         elif myaction in ("depclean", "info", "prune"):
15817
15818                 # Ensure atoms are valid before calling unmerge().
15819                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15820                 valid_atoms = []
15821                 for x in myfiles:
15822                         if is_valid_package_atom(x):
15823                                 try:
15824                                         valid_atoms.append(
15825                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15826                                 except portage.exception.AmbiguousPackageName, e:
15827                                         msg = "The short ebuild name \"" + x + \
15828                                                 "\" is ambiguous.  Please specify " + \
15829                                                 "one of the following " + \
15830                                                 "fully-qualified ebuild names instead:"
15831                                         for line in textwrap.wrap(msg, 70):
15832                                                 writemsg_level("!!! %s\n" % (line,),
15833                                                         level=logging.ERROR, noiselevel=-1)
15834                                         for i in e[0]:
15835                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15836                                                         level=logging.ERROR, noiselevel=-1)
15837                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15838                                         return 1
15839                                 continue
15840                         msg = []
15841                         msg.append("'%s' is not a valid package atom." % (x,))
15842                         msg.append("Please check ebuild(5) for full details.")
15843                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15844                                 level=logging.ERROR, noiselevel=-1)
15845                         return 1
15846
15847                 if myaction == "info":
15848                         return action_info(settings, trees, myopts, valid_atoms)
15849
15850                 validate_ebuild_environment(trees)
15851                 action_depclean(settings, trees, mtimedb["ldpath"],
15852                         myopts, myaction, valid_atoms, spinner)
15853                 if not (buildpkgonly or fetchonly or pretend):
15854                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15855         # "update", "system", or just process files:
15856         else:
15857                 validate_ebuild_environment(trees)
15858
15859                 for x in myfiles:
15860                         if x.startswith(SETPREFIX) or \
15861                                 is_valid_package_atom(x):
15862                                 continue
15863                         if x[:1] == os.sep:
15864                                 continue
15865                         try:
15866                                 os.lstat(x)
15867                                 continue
15868                         except OSError:
15869                                 pass
15870                         msg = []
15871                         msg.append("'%s' is not a valid package atom." % (x,))
15872                         msg.append("Please check ebuild(5) for full details.")
15873                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15874                                 level=logging.ERROR, noiselevel=-1)
15875                         return 1
15876
15877                 if "--pretend" not in myopts:
15878                         display_news_notification(root_config, myopts)
15879                 retval = action_build(settings, trees, mtimedb,
15880                         myopts, myaction, myfiles, spinner)
15881                 root_config = trees[settings["ROOT"]]["root_config"]
15882                 post_emerge(root_config, myopts, mtimedb, retval)
15883
15884                 return retval