When in --nodeps mode, make depgraph._serialize_tasks() preserve the package
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
59
60 from itertools import chain, izip
61
62 try:
63         import cPickle as pickle
64 except ImportError:
65         import pickle
66
67 try:
68         from cStringIO import StringIO
69 except ImportError:
70         from StringIO import StringIO
71
72 class stdout_spinner(object):
73         scroll_msgs = [
74                 "Gentoo Rocks ("+platform.system()+")",
75                 "Thank you for using Gentoo. :)",
76                 "Are you actually trying to read this?",
77                 "How many times have you stared at this?",
78                 "We are generating the cache right now",
79                 "You are paying too much attention.",
80                 "A theory is better than its explanation.",
81                 "Phasers locked on target, Captain.",
82                 "Thrashing is just virtual crashing.",
83                 "To be is to program.",
84                 "Real Users hate Real Programmers.",
85                 "When all else fails, read the instructions.",
86                 "Functionality breeds Contempt.",
87                 "The future lies ahead.",
88                 "3.1415926535897932384626433832795028841971694",
89                 "Sometimes insanity is the only alternative.",
90                 "Inaccuracy saves a world of explanation.",
91         ]
92
93         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
94
95         def __init__(self):
96                 self.spinpos = 0
97                 self.update = self.update_twirl
98                 self.scroll_sequence = self.scroll_msgs[
99                         int(time.time() * 100) % len(self.scroll_msgs)]
100                 self.last_update = 0
101                 self.min_display_latency = 0.05
102
103         def _return_early(self):
104                 """
105                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106                 each update* method should return without doing any output when this
107                 method returns True.
108                 """
109                 cur_time = time.time()
110                 if cur_time - self.last_update < self.min_display_latency:
111                         return True
112                 self.last_update = cur_time
113                 return False
114
115         def update_basic(self):
116                 self.spinpos = (self.spinpos + 1) % 500
117                 if self._return_early():
118                         return
119                 if (self.spinpos % 100) == 0:
120                         if self.spinpos == 0:
121                                 sys.stdout.write(". ")
122                         else:
123                                 sys.stdout.write(".")
124                 sys.stdout.flush()
125
126         def update_scroll(self):
127                 if self._return_early():
128                         return
129                 if(self.spinpos >= len(self.scroll_sequence)):
130                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132                 else:
133                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134                 sys.stdout.flush()
135                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136
137         def update_twirl(self):
138                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139                 if self._return_early():
140                         return
141                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142                 sys.stdout.flush()
143
144         def update_quiet(self):
145                 return
146
147 def userquery(prompt, responses=None, colours=None):
148         """Displays a prompt and a set of responses, then waits for a response
149         which is checked against the responses and the first to match is
150         returned.  An empty response will match the first value in responses.  The
151         input buffer is *not* cleared prior to the prompt!
152
153         prompt: a String.
154         responses: a List of Strings.
155         colours: a List of Functions taking and returning a String, used to
156         process the responses for display. Typically these will be functions
157         like red() but could be e.g. lambda x: "DisplayString".
158         If responses is omitted, defaults to ["Yes", "No"], [green, red].
159         If only colours is omitted, defaults to [bold, ...].
160
161         Returns a member of the List responses. (If called without optional
162         arguments, returns "Yes" or "No".)
163         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164         printed."""
165         if responses is None:
166                 responses = ["Yes", "No"]
167                 colours = [
168                         create_color_func("PROMPT_CHOICE_DEFAULT"),
169                         create_color_func("PROMPT_CHOICE_OTHER")
170                 ]
171         elif colours is None:
172                 colours=[bold]
173         colours=(colours*len(responses))[:len(responses)]
174         print bold(prompt),
175         try:
176                 while True:
177                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178                         for key in responses:
179                                 # An empty response will match the first value in responses.
180                                 if response.upper()==key[:len(response)].upper():
181                                         return key
182                         print "Sorry, response '%s' not understood." % response,
183         except (EOFError, KeyboardInterrupt):
184                 print "Interrupted."
185                 sys.exit(1)
186
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen",  "search",
191 "sync",  "unmerge", "version",
192 ])
193 options=[
194 "--ask",          "--alphabetical",
195 "--buildpkg",     "--buildpkgonly",
196 "--changelog",    "--columns",
197 "--complete-graph",
198 "--debug",        "--deep",
199 "--digest",
200 "--emptytree",
201 "--fetchonly",    "--fetch-all-uri",
202 "--getbinpkg",    "--getbinpkgonly",
203 "--help",         "--ignore-default-opts",
204 "--keep-going",
205 "--noconfmem",
206 "--newuse",
207 "--nodeps",       "--noreplace",
208 "--nospinner",    "--oneshot",
209 "--onlydeps",     "--pretend",
210 "--quiet",        "--resume",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 if setconfig is None:
775                         self.sets = {}
776                 else:
777                         self.sets = self.setconfig.getSets()
778                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779
780 def create_world_atom(pkg, args_set, root_config):
781         """Create a new atom for the world file if one does not exist.  If the
782         argument atom is precise enough to identify a specific slot then a slot
783         atom will be returned. Atoms that are in the system set may also be stored
784         in world since system atoms can only match one slot while world atoms can
785         be greedy with respect to slots.  Unslotted system packages will not be
786         stored in world."""
787
788         arg_atom = args_set.findAtomForPackage(pkg)
789         if not arg_atom:
790                 return None
791         cp = portage.dep_getkey(arg_atom)
792         new_world_atom = cp
793         sets = root_config.sets
794         portdb = root_config.trees["porttree"].dbapi
795         vardb = root_config.trees["vartree"].dbapi
796         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797                 for cpv in portdb.match(cp))
798         slotted = len(available_slots) > 1 or \
799                 (len(available_slots) == 1 and "0" not in available_slots)
800         if not slotted:
801                 # check the vdb in case this is multislot
802                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803                         for cpv in vardb.match(cp))
804                 slotted = len(available_slots) > 1 or \
805                         (len(available_slots) == 1 and "0" not in available_slots)
806         if slotted and arg_atom != cp:
807                 # If the user gave a specific atom, store it as a
808                 # slot atom in the world file.
809                 slot_atom = pkg.slot_atom
810
811                 # For USE=multislot, there are a couple of cases to
812                 # handle here:
813                 #
814                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815                 #    unknown value, so just record an unslotted atom.
816                 #
817                 # 2) SLOT comes from an installed package and there is no
818                 #    matching SLOT in the portage tree.
819                 #
820                 # Make sure that the slot atom is available in either the
821                 # portdb or the vardb, since otherwise the user certainly
822                 # doesn't want the SLOT atom recorded in the world file
823                 # (case 1 above).  If it's only available in the vardb,
824                 # the user may be trying to prevent a USE=multislot
825                 # package from being removed by --depclean (case 2 above).
826
827                 mydb = portdb
828                 if not portdb.match(slot_atom):
829                         # SLOT seems to come from an installed multislot package
830                         mydb = vardb
831                 # If there is no installed package matching the SLOT atom,
832                 # it probably changed SLOT spontaneously due to USE=multislot,
833                 # so just record an unslotted atom.
834                 if vardb.match(slot_atom):
835                         # Now verify that the argument is precise
836                         # enough to identify a specific slot.
837                         matches = mydb.match(arg_atom)
838                         matched_slots = set()
839                         for cpv in matches:
840                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841                         if len(matched_slots) == 1:
842                                 new_world_atom = slot_atom
843
844         if new_world_atom == sets["world"].findAtomForPackage(pkg):
845                 # Both atoms would be identical, so there's nothing to add.
846                 return None
847         if not slotted:
848                 # Unlike world atoms, system atoms are not greedy for slots, so they
849                 # can't be safely excluded from world if they are slotted.
850                 system_atom = sets["system"].findAtomForPackage(pkg)
851                 if system_atom:
852                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
853                                 return None
854                         # System virtuals aren't safe to exclude from world since they can
855                         # match multiple old-style virtuals but only one of them will be
856                         # pulled in by update or depclean.
857                         providers = portdb.mysettings.getvirtuals().get(
858                                 portage.dep_getkey(system_atom))
859                         if providers and len(providers) == 1 and providers[0] == cp:
860                                 return None
861         return new_world_atom
862
863 def filter_iuse_defaults(iuse):
864         for flag in iuse:
865                 if flag.startswith("+") or flag.startswith("-"):
866                         yield flag[1:]
867                 else:
868                         yield flag
869
870 class SlotObject(object):
871         __slots__ = ("__weakref__",)
872
873         def __init__(self, **kwargs):
874                 classes = [self.__class__]
875                 while classes:
876                         c = classes.pop()
877                         if c is SlotObject:
878                                 continue
879                         classes.extend(c.__bases__)
880                         slots = getattr(c, "__slots__", None)
881                         if not slots:
882                                 continue
883                         for myattr in slots:
884                                 myvalue = kwargs.get(myattr, None)
885                                 setattr(self, myattr, myvalue)
886
887         def copy(self):
888                 """
889                 Create a new instance and copy all attributes
890                 defined from __slots__ (including those from
891                 inherited classes).
892                 """
893                 obj = self.__class__()
894
895                 classes = [self.__class__]
896                 while classes:
897                         c = classes.pop()
898                         if c is SlotObject:
899                                 continue
900                         classes.extend(c.__bases__)
901                         slots = getattr(c, "__slots__", None)
902                         if not slots:
903                                 continue
904                         for myattr in slots:
905                                 setattr(obj, myattr, getattr(self, myattr))
906
907                 return obj
908
909 class AbstractDepPriority(SlotObject):
910         __slots__ = ("buildtime", "runtime", "runtime_post")
911
912         def __lt__(self, other):
913                 return self.__int__() < other
914
915         def __le__(self, other):
916                 return self.__int__() <= other
917
918         def __eq__(self, other):
919                 return self.__int__() == other
920
921         def __ne__(self, other):
922                 return self.__int__() != other
923
924         def __gt__(self, other):
925                 return self.__int__() > other
926
927         def __ge__(self, other):
928                 return self.__int__() >= other
929
930         def copy(self):
931                 import copy
932                 return copy.copy(self)
933
934 class DepPriority(AbstractDepPriority):
935
936         __slots__ = ("satisfied", "optional", "rebuild")
937
938         def __int__(self):
939                 return 0
940
941         def __str__(self):
942                 if self.optional:
943                         return "optional"
944                 if self.buildtime:
945                         return "buildtime"
946                 if self.runtime:
947                         return "runtime"
948                 if self.runtime_post:
949                         return "runtime_post"
950                 return "soft"
951
952 class BlockerDepPriority(DepPriority):
953         __slots__ = ()
954         def __int__(self):
955                 return 0
956
957         def __str__(self):
958                 return 'blocker'
959
960 BlockerDepPriority.instance = BlockerDepPriority()
961
962 class UnmergeDepPriority(AbstractDepPriority):
963         __slots__ = ("optional", "satisfied",)
964         """
965         Combination of properties           Priority  Category
966
967         runtime                                0       HARD
968         runtime_post                          -1       HARD
969         buildtime                             -2       SOFT
970         (none of the above)                   -2       SOFT
971         """
972
973         MAX    =  0
974         SOFT   = -2
975         MIN    = -2
976
977         def __int__(self):
978                 if self.runtime:
979                         return 0
980                 if self.runtime_post:
981                         return -1
982                 if self.buildtime:
983                         return -2
984                 return -2
985
986         def __str__(self):
987                 myvalue = self.__int__()
988                 if myvalue > self.SOFT:
989                         return "hard"
990                 return "soft"
991
992 class DepPriorityNormalRange(object):
993         """
994         DepPriority properties              Index      Category
995
996         buildtime                                      HARD
997         runtime                                3       MEDIUM
998         runtime_post                           2       MEDIUM_SOFT
999         optional                               1       SOFT
1000         (none of the above)                    0       NONE
1001         """
1002         MEDIUM      = 3
1003         MEDIUM_SOFT = 2
1004         SOFT        = 1
1005         NONE        = 0
1006
1007         @classmethod
1008         def _ignore_optional(cls, priority):
1009                 if priority.__class__ is not DepPriority:
1010                         return False
1011                 return bool(priority.optional)
1012
1013         @classmethod
1014         def _ignore_runtime_post(cls, priority):
1015                 if priority.__class__ is not DepPriority:
1016                         return False
1017                 return bool(priority.optional or priority.runtime_post)
1018
1019         @classmethod
1020         def _ignore_runtime(cls, priority):
1021                 if priority.__class__ is not DepPriority:
1022                         return False
1023                 return not priority.buildtime
1024
1025         ignore_medium      = _ignore_runtime
1026         ignore_medium_soft = _ignore_runtime_post
1027         ignore_soft        = _ignore_optional
1028
1029 DepPriorityNormalRange.ignore_priority = (
1030         None,
1031         DepPriorityNormalRange._ignore_optional,
1032         DepPriorityNormalRange._ignore_runtime_post,
1033         DepPriorityNormalRange._ignore_runtime
1034 )
1035
1036 class DepPrioritySatisfiedRange(object):
1037         """
1038         DepPriority                         Index      Category
1039
1040         not satisfied and buildtime                    HARD
1041         not satisfied and runtime              7       MEDIUM
1042         not satisfied and runtime_post         6       MEDIUM_SOFT
1043         satisfied and buildtime and rebuild    5       SOFT
1044         satisfied and buildtime                4       SOFT
1045         satisfied and runtime                  3       SOFT
1046         satisfied and runtime_post             2       SOFT
1047         optional                               1       SOFT
1048         (none of the above)                    0       NONE
1049         """
1050         MEDIUM      = 7
1051         MEDIUM_SOFT = 6
1052         SOFT        = 5
1053         NONE        = 0
1054
1055         @classmethod
1056         def _ignore_optional(cls, priority):
1057                 if priority.__class__ is not DepPriority:
1058                         return False
1059                 return bool(priority.optional)
1060
1061         @classmethod
1062         def _ignore_satisfied_runtime_post(cls, priority):
1063                 if priority.__class__ is not DepPriority:
1064                         return False
1065                 if priority.optional:
1066                         return True
1067                 if not priority.satisfied:
1068                         return False
1069                 return bool(priority.runtime_post)
1070
1071         @classmethod
1072         def _ignore_satisfied_runtime(cls, priority):
1073                 if priority.__class__ is not DepPriority:
1074                         return False
1075                 if priority.optional:
1076                         return True
1077                 if not priority.satisfied:
1078                         return False
1079                 return not priority.buildtime
1080
1081         @classmethod
1082         def _ignore_satisfied_buildtime(cls, priority):
1083                 if priority.__class__ is not DepPriority:
1084                         return False
1085                 if priority.optional:
1086                         return True
1087                 if not priority.satisfied:
1088                         return False
1089                 if priority.buildtime:
1090                         return not priority.rebuild
1091                 return True
1092
1093         @classmethod
1094         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1095                 if priority.__class__ is not DepPriority:
1096                         return False
1097                 if priority.optional:
1098                         return True
1099                 return bool(priority.satisfied)
1100
1101         @classmethod
1102         def _ignore_runtime_post(cls, priority):
1103                 if priority.__class__ is not DepPriority:
1104                         return False
1105                 return bool(priority.optional or \
1106                         priority.satisfied or \
1107                         priority.runtime_post)
1108
1109         @classmethod
1110         def _ignore_runtime(cls, priority):
1111                 if priority.__class__ is not DepPriority:
1112                         return False
1113                 return bool(priority.satisfied or \
1114                         not priority.buildtime)
1115
1116         ignore_medium      = _ignore_runtime
1117         ignore_medium_soft = _ignore_runtime_post
1118         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1119
1120 DepPrioritySatisfiedRange.ignore_priority = (
1121         None,
1122         DepPrioritySatisfiedRange._ignore_optional,
1123         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1124         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1125         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1126         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1127         DepPrioritySatisfiedRange._ignore_runtime_post,
1128         DepPrioritySatisfiedRange._ignore_runtime
1129 )
1130
1131 def _find_deep_system_runtime_deps(graph):
1132         deep_system_deps = set()
1133         node_stack = []
1134         for node in graph:
1135                 if not isinstance(node, Package) or \
1136                         node.operation == 'uninstall':
1137                         continue
1138                 if node.root_config.sets['system'].findAtomForPackage(node):
1139                         node_stack.append(node)
1140
1141         def ignore_priority(priority):
1142                 """
1143                 Ignore non-runtime priorities.
1144                 """
1145                 if isinstance(priority, DepPriority) and \
1146                         (priority.runtime or priority.runtime_post):
1147                         return False
1148                 return True
1149
1150         while node_stack:
1151                 node = node_stack.pop()
1152                 if node in deep_system_deps:
1153                         continue
1154                 deep_system_deps.add(node)
1155                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1156                         if not isinstance(child, Package) or \
1157                                 child.operation == 'uninstall':
1158                                 continue
1159                         node_stack.append(child)
1160
1161         return deep_system_deps
1162
1163 class FakeVartree(portage.vartree):
1164         """This is implements an in-memory copy of a vartree instance that provides
1165         all the interfaces required for use by the depgraph.  The vardb is locked
1166         during the constructor call just long enough to read a copy of the
1167         installed package information.  This allows the depgraph to do it's
1168         dependency calculations without holding a lock on the vardb.  It also
1169         allows things like vardb global updates to be done in memory so that the
1170         user doesn't necessarily need write access to the vardb in cases where
1171         global updates are necessary (updates are performed when necessary if there
1172         is not a matching ebuild in the tree)."""
1173         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1174                 self._root_config = root_config
1175                 if pkg_cache is None:
1176                         pkg_cache = {}
1177                 real_vartree = root_config.trees["vartree"]
1178                 portdb = root_config.trees["porttree"].dbapi
1179                 self.root = real_vartree.root
1180                 self.settings = real_vartree.settings
1181                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1182                 if "_mtime_" not in mykeys:
1183                         mykeys.append("_mtime_")
1184                 self._db_keys = mykeys
1185                 self._pkg_cache = pkg_cache
1186                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1187                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1188                 try:
1189                         # At least the parent needs to exist for the lock file.
1190                         portage.util.ensure_dirs(vdb_path)
1191                 except portage.exception.PortageException:
1192                         pass
1193                 vdb_lock = None
1194                 try:
1195                         if acquire_lock and os.access(vdb_path, os.W_OK):
1196                                 vdb_lock = portage.locks.lockdir(vdb_path)
1197                         real_dbapi = real_vartree.dbapi
1198                         slot_counters = {}
1199                         for cpv in real_dbapi.cpv_all():
1200                                 cache_key = ("installed", self.root, cpv, "nomerge")
1201                                 pkg = self._pkg_cache.get(cache_key)
1202                                 if pkg is not None:
1203                                         metadata = pkg.metadata
1204                                 else:
1205                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1206                                 myslot = metadata["SLOT"]
1207                                 mycp = portage.dep_getkey(cpv)
1208                                 myslot_atom = "%s:%s" % (mycp, myslot)
1209                                 try:
1210                                         mycounter = long(metadata["COUNTER"])
1211                                 except ValueError:
1212                                         mycounter = 0
1213                                         metadata["COUNTER"] = str(mycounter)
1214                                 other_counter = slot_counters.get(myslot_atom, None)
1215                                 if other_counter is not None:
1216                                         if other_counter > mycounter:
1217                                                 continue
1218                                 slot_counters[myslot_atom] = mycounter
1219                                 if pkg is None:
1220                                         pkg = Package(built=True, cpv=cpv,
1221                                                 installed=True, metadata=metadata,
1222                                                 root_config=root_config, type_name="installed")
1223                                 self._pkg_cache[pkg] = pkg
1224                                 self.dbapi.cpv_inject(pkg)
1225                         real_dbapi.flush_cache()
1226                 finally:
1227                         if vdb_lock:
1228                                 portage.locks.unlockdir(vdb_lock)
1229                 # Populate the old-style virtuals using the cached values.
1230                 if not self.settings.treeVirtuals:
1231                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1232                                 portage.getCPFromCPV, self.get_all_provides())
1233
1234                 # Intialize variables needed for lazy cache pulls of the live ebuild
1235                 # metadata.  This ensures that the vardb lock is released ASAP, without
1236                 # being delayed in case cache generation is triggered.
1237                 self._aux_get = self.dbapi.aux_get
1238                 self.dbapi.aux_get = self._aux_get_wrapper
1239                 self._match = self.dbapi.match
1240                 self.dbapi.match = self._match_wrapper
1241                 self._aux_get_history = set()
1242                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1243                 self._portdb = portdb
1244                 self._global_updates = None
1245
1246         def _match_wrapper(self, cpv, use_cache=1):
1247                 """
1248                 Make sure the metadata in Package instances gets updated for any
1249                 cpv that is returned from a match() call, since the metadata can
1250                 be accessed directly from the Package instance instead of via
1251                 aux_get().
1252                 """
1253                 matches = self._match(cpv, use_cache=use_cache)
1254                 for cpv in matches:
1255                         if cpv in self._aux_get_history:
1256                                 continue
1257                         self._aux_get_wrapper(cpv, [])
1258                 return matches
1259
1260         def _aux_get_wrapper(self, pkg, wants):
1261                 if pkg in self._aux_get_history:
1262                         return self._aux_get(pkg, wants)
1263                 self._aux_get_history.add(pkg)
1264                 try:
1265                         # Use the live ebuild metadata if possible.
1266                         live_metadata = dict(izip(self._portdb_keys,
1267                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1268                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1269                                 raise KeyError(pkg)
1270                         self.dbapi.aux_update(pkg, live_metadata)
1271                 except (KeyError, portage.exception.PortageException):
1272                         if self._global_updates is None:
1273                                 self._global_updates = \
1274                                         grab_global_updates(self._portdb.porttree_root)
1275                         perform_global_updates(
1276                                 pkg, self.dbapi, self._global_updates)
1277                 return self._aux_get(pkg, wants)
1278
1279         def sync(self, acquire_lock=1):
1280                 """
1281                 Call this method to synchronize state with the real vardb
1282                 after one or more packages may have been installed or
1283                 uninstalled.
1284                 """
1285                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1286                 try:
1287                         # At least the parent needs to exist for the lock file.
1288                         portage.util.ensure_dirs(vdb_path)
1289                 except portage.exception.PortageException:
1290                         pass
1291                 vdb_lock = None
1292                 try:
1293                         if acquire_lock and os.access(vdb_path, os.W_OK):
1294                                 vdb_lock = portage.locks.lockdir(vdb_path)
1295                         self._sync()
1296                 finally:
1297                         if vdb_lock:
1298                                 portage.locks.unlockdir(vdb_lock)
1299
1300         def _sync(self):
1301
1302                 real_vardb = self._root_config.trees["vartree"].dbapi
1303                 current_cpv_set = frozenset(real_vardb.cpv_all())
1304                 pkg_vardb = self.dbapi
1305                 aux_get_history = self._aux_get_history
1306
1307                 # Remove any packages that have been uninstalled.
1308                 for pkg in list(pkg_vardb):
1309                         if pkg.cpv not in current_cpv_set:
1310                                 pkg_vardb.cpv_remove(pkg)
1311                                 aux_get_history.discard(pkg.cpv)
1312
1313                 # Validate counters and timestamps.
1314                 slot_counters = {}
1315                 root = self.root
1316                 validation_keys = ["COUNTER", "_mtime_"]
1317                 for cpv in current_cpv_set:
1318
1319                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1320                         pkg = pkg_vardb.get(pkg_hash_key)
1321                         if pkg is not None:
1322                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1323                                 try:
1324                                         counter = long(counter)
1325                                 except ValueError:
1326                                         counter = 0
1327
1328                                 if counter != pkg.counter or \
1329                                         mtime != pkg.mtime:
1330                                         pkg_vardb.cpv_remove(pkg)
1331                                         aux_get_history.discard(pkg.cpv)
1332                                         pkg = None
1333
1334                         if pkg is None:
1335                                 pkg = self._pkg(cpv)
1336
1337                         other_counter = slot_counters.get(pkg.slot_atom)
1338                         if other_counter is not None:
1339                                 if other_counter > pkg.counter:
1340                                         continue
1341
1342                         slot_counters[pkg.slot_atom] = pkg.counter
1343                         pkg_vardb.cpv_inject(pkg)
1344
1345                 real_vardb.flush_cache()
1346
1347         def _pkg(self, cpv):
1348                 root_config = self._root_config
1349                 real_vardb = root_config.trees["vartree"].dbapi
1350                 pkg = Package(cpv=cpv, installed=True,
1351                         metadata=izip(self._db_keys,
1352                         real_vardb.aux_get(cpv, self._db_keys)),
1353                         root_config=root_config,
1354                         type_name="installed")
1355
1356                 try:
1357                         mycounter = long(pkg.metadata["COUNTER"])
1358                 except ValueError:
1359                         mycounter = 0
1360                         pkg.metadata["COUNTER"] = str(mycounter)
1361
1362                 return pkg
1363
1364 def grab_global_updates(portdir):
1365         from portage.update import grab_updates, parse_updates
1366         updpath = os.path.join(portdir, "profiles", "updates")
1367         try:
1368                 rawupdates = grab_updates(updpath)
1369         except portage.exception.DirectoryNotFound:
1370                 rawupdates = []
1371         upd_commands = []
1372         for mykey, mystat, mycontent in rawupdates:
1373                 commands, errors = parse_updates(mycontent)
1374                 upd_commands.extend(commands)
1375         return upd_commands
1376
1377 def perform_global_updates(mycpv, mydb, mycommands):
1378         from portage.update import update_dbentries
1379         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1380         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1381         updates = update_dbentries(mycommands, aux_dict)
1382         if updates:
1383                 mydb.aux_update(mycpv, updates)
1384
1385 def visible(pkgsettings, pkg):
1386         """
1387         Check if a package is visible. This can raise an InvalidDependString
1388         exception if LICENSE is invalid.
1389         TODO: optionally generate a list of masking reasons
1390         @rtype: Boolean
1391         @returns: True if the package is visible, False otherwise.
1392         """
1393         if not pkg.metadata["SLOT"]:
1394                 return False
1395         if not pkg.installed:
1396                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1397                         return False
1398         eapi = pkg.metadata["EAPI"]
1399         if not portage.eapi_is_supported(eapi):
1400                 return False
1401         if not pkg.installed:
1402                 if portage._eapi_is_deprecated(eapi):
1403                         return False
1404                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1405                         return False
1406         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1407                 return False
1408         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1409                 return False
1410         try:
1411                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1412                         return False
1413         except portage.exception.InvalidDependString:
1414                 return False
1415         return True
1416
1417 def get_masking_status(pkg, pkgsettings, root_config):
1418
1419         mreasons = portage.getmaskingstatus(
1420                 pkg, settings=pkgsettings,
1421                 portdb=root_config.trees["porttree"].dbapi)
1422
1423         if not pkg.installed:
1424                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1425                         mreasons.append("CHOST: %s" % \
1426                                 pkg.metadata["CHOST"])
1427
1428         if not pkg.metadata["SLOT"]:
1429                 mreasons.append("invalid: SLOT is undefined")
1430
1431         return mreasons
1432
1433 def get_mask_info(root_config, cpv, pkgsettings,
1434         db, pkg_type, built, installed, db_keys):
1435         eapi_masked = False
1436         try:
1437                 metadata = dict(izip(db_keys,
1438                         db.aux_get(cpv, db_keys)))
1439         except KeyError:
1440                 metadata = None
1441         if metadata and not built:
1442                 pkgsettings.setcpv(cpv, mydb=metadata)
1443                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1444                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1445         if metadata is None:
1446                 mreasons = ["corruption"]
1447         else:
1448                 eapi = metadata['EAPI']
1449                 if eapi[:1] == '-':
1450                         eapi = eapi[1:]
1451                 if not portage.eapi_is_supported(eapi):
1452                         mreasons = ['EAPI %s' % eapi]
1453                 else:
1454                         pkg = Package(type_name=pkg_type, root_config=root_config,
1455                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1456                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1457         return metadata, mreasons
1458
1459 def show_masked_packages(masked_packages):
1460         shown_licenses = set()
1461         shown_comments = set()
1462         # Maybe there is both an ebuild and a binary. Only
1463         # show one of them to avoid redundant appearance.
1464         shown_cpvs = set()
1465         have_eapi_mask = False
1466         for (root_config, pkgsettings, cpv,
1467                 metadata, mreasons) in masked_packages:
1468                 if cpv in shown_cpvs:
1469                         continue
1470                 shown_cpvs.add(cpv)
1471                 comment, filename = None, None
1472                 if "package.mask" in mreasons:
1473                         comment, filename = \
1474                                 portage.getmaskingreason(
1475                                 cpv, metadata=metadata,
1476                                 settings=pkgsettings,
1477                                 portdb=root_config.trees["porttree"].dbapi,
1478                                 return_location=True)
1479                 missing_licenses = []
1480                 if metadata:
1481                         if not portage.eapi_is_supported(metadata["EAPI"]):
1482                                 have_eapi_mask = True
1483                         try:
1484                                 missing_licenses = \
1485                                         pkgsettings._getMissingLicenses(
1486                                                 cpv, metadata)
1487                         except portage.exception.InvalidDependString:
1488                                 # This will have already been reported
1489                                 # above via mreasons.
1490                                 pass
1491
1492                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1493                 if comment and comment not in shown_comments:
1494                         print filename+":"
1495                         print comment
1496                         shown_comments.add(comment)
1497                 portdb = root_config.trees["porttree"].dbapi
1498                 for l in missing_licenses:
1499                         l_path = portdb.findLicensePath(l)
1500                         if l in shown_licenses:
1501                                 continue
1502                         msg = ("A copy of the '%s' license" + \
1503                         " is located at '%s'.") % (l, l_path)
1504                         print msg
1505                         print
1506                         shown_licenses.add(l)
1507         return have_eapi_mask
1508
1509 class Task(SlotObject):
1510         __slots__ = ("_hash_key", "_hash_value")
1511
1512         def _get_hash_key(self):
1513                 hash_key = getattr(self, "_hash_key", None)
1514                 if hash_key is None:
1515                         raise NotImplementedError(self)
1516                 return hash_key
1517
1518         def __eq__(self, other):
1519                 return self._get_hash_key() == other
1520
1521         def __ne__(self, other):
1522                 return self._get_hash_key() != other
1523
1524         def __hash__(self):
1525                 hash_value = getattr(self, "_hash_value", None)
1526                 if hash_value is None:
1527                         self._hash_value = hash(self._get_hash_key())
1528                 return self._hash_value
1529
1530         def __len__(self):
1531                 return len(self._get_hash_key())
1532
1533         def __getitem__(self, key):
1534                 return self._get_hash_key()[key]
1535
1536         def __iter__(self):
1537                 return iter(self._get_hash_key())
1538
1539         def __contains__(self, key):
1540                 return key in self._get_hash_key()
1541
1542         def __str__(self):
1543                 return str(self._get_hash_key())
1544
1545 class Blocker(Task):
1546
1547         __hash__ = Task.__hash__
1548         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1549
1550         def __init__(self, **kwargs):
1551                 Task.__init__(self, **kwargs)
1552                 self.cp = portage.dep_getkey(self.atom)
1553
1554         def _get_hash_key(self):
1555                 hash_key = getattr(self, "_hash_key", None)
1556                 if hash_key is None:
1557                         self._hash_key = \
1558                                 ("blocks", self.root, self.atom, self.eapi)
1559                 return self._hash_key
1560
1561 class Package(Task):
1562
1563         __hash__ = Task.__hash__
1564         __slots__ = ("built", "cpv", "depth",
1565                 "installed", "metadata", "onlydeps", "operation",
1566                 "root_config", "type_name",
1567                 "category", "counter", "cp", "cpv_split",
1568                 "inherited", "iuse", "mtime",
1569                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1570
1571         metadata_keys = [
1572                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1573                 "INHERITED", "IUSE", "KEYWORDS",
1574                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1575                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1576
1577         def __init__(self, **kwargs):
1578                 Task.__init__(self, **kwargs)
1579                 self.root = self.root_config.root
1580                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1581                 self.cp = portage.cpv_getkey(self.cpv)
1582                 slot = self.slot
1583                 if not slot:
1584                         # Avoid an InvalidAtom exception when creating slot_atom.
1585                         # This package instance will be masked due to empty SLOT.
1586                         slot = '0'
1587                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1588                 self.category, self.pf = portage.catsplit(self.cpv)
1589                 self.cpv_split = portage.catpkgsplit(self.cpv)
1590                 self.pv_split = self.cpv_split[1:]
1591
1592         class _use(object):
1593
1594                 __slots__ = ("__weakref__", "enabled")
1595
1596                 def __init__(self, use):
1597                         self.enabled = frozenset(use)
1598
1599         class _iuse(object):
1600
1601                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1602
1603                 def __init__(self, tokens, iuse_implicit):
1604                         self.tokens = tuple(tokens)
1605                         self.iuse_implicit = iuse_implicit
1606                         enabled = []
1607                         disabled = []
1608                         other = []
1609                         for x in tokens:
1610                                 prefix = x[:1]
1611                                 if prefix == "+":
1612                                         enabled.append(x[1:])
1613                                 elif prefix == "-":
1614                                         disabled.append(x[1:])
1615                                 else:
1616                                         other.append(x)
1617                         self.enabled = frozenset(enabled)
1618                         self.disabled = frozenset(disabled)
1619                         self.all = frozenset(chain(enabled, disabled, other))
1620
1621                 def __getattribute__(self, name):
1622                         if name == "regex":
1623                                 try:
1624                                         return object.__getattribute__(self, "regex")
1625                                 except AttributeError:
1626                                         all = object.__getattribute__(self, "all")
1627                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1628                                         # Escape anything except ".*" which is supposed
1629                                         # to pass through from _get_implicit_iuse()
1630                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1631                                         regex = "^(%s)$" % "|".join(regex)
1632                                         regex = regex.replace("\\.\\*", ".*")
1633                                         self.regex = re.compile(regex)
1634                         return object.__getattribute__(self, name)
1635
1636         def _get_hash_key(self):
1637                 hash_key = getattr(self, "_hash_key", None)
1638                 if hash_key is None:
1639                         if self.operation is None:
1640                                 self.operation = "merge"
1641                                 if self.onlydeps or self.installed:
1642                                         self.operation = "nomerge"
1643                         self._hash_key = \
1644                                 (self.type_name, self.root, self.cpv, self.operation)
1645                 return self._hash_key
1646
1647         def __lt__(self, other):
1648                 if other.cp != self.cp:
1649                         return False
1650                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1651                         return True
1652                 return False
1653
1654         def __le__(self, other):
1655                 if other.cp != self.cp:
1656                         return False
1657                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1658                         return True
1659                 return False
1660
1661         def __gt__(self, other):
1662                 if other.cp != self.cp:
1663                         return False
1664                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1665                         return True
1666                 return False
1667
1668         def __ge__(self, other):
1669                 if other.cp != self.cp:
1670                         return False
1671                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1672                         return True
1673                 return False
1674
1675 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1676         if not x.startswith("UNUSED_"))
1677 _all_metadata_keys.discard("CDEPEND")
1678 _all_metadata_keys.update(Package.metadata_keys)
1679
1680 from portage.cache.mappings import slot_dict_class
1681 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1682
1683 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1684         """
1685         Detect metadata updates and synchronize Package attributes.
1686         """
1687
1688         __slots__ = ("_pkg",)
1689         _wrapped_keys = frozenset(
1690                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1691
1692         def __init__(self, pkg, metadata):
1693                 _PackageMetadataWrapperBase.__init__(self)
1694                 self._pkg = pkg
1695                 self.update(metadata)
1696
1697         def __setitem__(self, k, v):
1698                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1699                 if k in self._wrapped_keys:
1700                         getattr(self, "_set_" + k.lower())(k, v)
1701
1702         def _set_inherited(self, k, v):
1703                 if isinstance(v, basestring):
1704                         v = frozenset(v.split())
1705                 self._pkg.inherited = v
1706
1707         def _set_iuse(self, k, v):
1708                 self._pkg.iuse = self._pkg._iuse(
1709                         v.split(), self._pkg.root_config.iuse_implicit)
1710
1711         def _set_slot(self, k, v):
1712                 self._pkg.slot = v
1713
1714         def _set_use(self, k, v):
1715                 self._pkg.use = self._pkg._use(v.split())
1716
1717         def _set_counter(self, k, v):
1718                 if isinstance(v, basestring):
1719                         try:
1720                                 v = long(v.strip())
1721                         except ValueError:
1722                                 v = 0
1723                 self._pkg.counter = v
1724
1725         def _set__mtime_(self, k, v):
1726                 if isinstance(v, basestring):
1727                         try:
1728                                 v = long(v.strip())
1729                         except ValueError:
1730                                 v = 0
1731                 self._pkg.mtime = v
1732
1733 class EbuildFetchonly(SlotObject):
1734
1735         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1736
1737         def execute(self):
1738                 settings = self.settings
1739                 pkg = self.pkg
1740                 portdb = pkg.root_config.trees["porttree"].dbapi
1741                 ebuild_path = portdb.findname(pkg.cpv)
1742                 settings.setcpv(pkg)
1743                 debug = settings.get("PORTAGE_DEBUG") == "1"
1744                 use_cache = 1 # always true
1745                 portage.doebuild_environment(ebuild_path, "fetch",
1746                         settings["ROOT"], settings, debug, use_cache, portdb)
1747                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1748
1749                 if restrict_fetch:
1750                         rval = self._execute_with_builddir()
1751                 else:
1752                         rval = portage.doebuild(ebuild_path, "fetch",
1753                                 settings["ROOT"], settings, debug=debug,
1754                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1755                                 mydbapi=portdb, tree="porttree")
1756
1757                         if rval != os.EX_OK:
1758                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1759                                 eerror(msg, phase="unpack", key=pkg.cpv)
1760
1761                 return rval
1762
1763         def _execute_with_builddir(self):
1764                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1765                 # ensuring sane $PWD (bug #239560) and storing elog
1766                 # messages. Use a private temp directory, in order
1767                 # to avoid locking the main one.
1768                 settings = self.settings
1769                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1770                 from tempfile import mkdtemp
1771                 try:
1772                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1773                 except OSError, e:
1774                         if e.errno != portage.exception.PermissionDenied.errno:
1775                                 raise
1776                         raise portage.exception.PermissionDenied(global_tmpdir)
1777                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1778                 settings.backup_changes("PORTAGE_TMPDIR")
1779                 try:
1780                         retval = self._execute()
1781                 finally:
1782                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1783                         settings.backup_changes("PORTAGE_TMPDIR")
1784                         shutil.rmtree(private_tmpdir)
1785                 return retval
1786
1787         def _execute(self):
1788                 settings = self.settings
1789                 pkg = self.pkg
1790                 root_config = pkg.root_config
1791                 portdb = root_config.trees["porttree"].dbapi
1792                 ebuild_path = portdb.findname(pkg.cpv)
1793                 debug = settings.get("PORTAGE_DEBUG") == "1"
1794                 retval = portage.doebuild(ebuild_path, "fetch",
1795                         self.settings["ROOT"], self.settings, debug=debug,
1796                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1797                         mydbapi=portdb, tree="porttree")
1798
1799                 if retval != os.EX_OK:
1800                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1801                         eerror(msg, phase="unpack", key=pkg.cpv)
1802
1803                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1804                 return retval
1805
1806 class PollConstants(object):
1807
1808         """
1809         Provides POLL* constants that are equivalent to those from the
1810         select module, for use by PollSelectAdapter.
1811         """
1812
1813         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1814         v = 1
1815         for k in names:
1816                 locals()[k] = getattr(select, k, v)
1817                 v *= 2
1818         del k, v
1819
1820 class AsynchronousTask(SlotObject):
1821         """
1822         Subclasses override _wait() and _poll() so that calls
1823         to public methods can be wrapped for implementing
1824         hooks such as exit listener notification.
1825
1826         Sublasses should call self.wait() to notify exit listeners after
1827         the task is complete and self.returncode has been set.
1828         """
1829
1830         __slots__ = ("background", "cancelled", "returncode") + \
1831                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1832
1833         def start(self):
1834                 """
1835                 Start an asynchronous task and then return as soon as possible.
1836                 """
1837                 self._start_hook()
1838                 self._start()
1839
1840         def _start(self):
1841                 raise NotImplementedError(self)
1842
1843         def isAlive(self):
1844                 return self.returncode is None
1845
1846         def poll(self):
1847                 self._wait_hook()
1848                 return self._poll()
1849
1850         def _poll(self):
1851                 return self.returncode
1852
1853         def wait(self):
1854                 if self.returncode is None:
1855                         self._wait()
1856                 self._wait_hook()
1857                 return self.returncode
1858
1859         def _wait(self):
1860                 return self.returncode
1861
1862         def cancel(self):
1863                 self.cancelled = True
1864                 self.wait()
1865
1866         def addStartListener(self, f):
1867                 """
1868                 The function will be called with one argument, a reference to self.
1869                 """
1870                 if self._start_listeners is None:
1871                         self._start_listeners = []
1872                 self._start_listeners.append(f)
1873
1874         def removeStartListener(self, f):
1875                 if self._start_listeners is None:
1876                         return
1877                 self._start_listeners.remove(f)
1878
1879         def _start_hook(self):
1880                 if self._start_listeners is not None:
1881                         start_listeners = self._start_listeners
1882                         self._start_listeners = None
1883
1884                         for f in start_listeners:
1885                                 f(self)
1886
1887         def addExitListener(self, f):
1888                 """
1889                 The function will be called with one argument, a reference to self.
1890                 """
1891                 if self._exit_listeners is None:
1892                         self._exit_listeners = []
1893                 self._exit_listeners.append(f)
1894
1895         def removeExitListener(self, f):
1896                 if self._exit_listeners is None:
1897                         if self._exit_listener_stack is not None:
1898                                 self._exit_listener_stack.remove(f)
1899                         return
1900                 self._exit_listeners.remove(f)
1901
1902         def _wait_hook(self):
1903                 """
1904                 Call this method after the task completes, just before returning
1905                 the returncode from wait() or poll(). This hook is
1906                 used to trigger exit listeners when the returncode first
1907                 becomes available.
1908                 """
1909                 if self.returncode is not None and \
1910                         self._exit_listeners is not None:
1911
1912                         # This prevents recursion, in case one of the
1913                         # exit handlers triggers this method again by
1914                         # calling wait(). Use a stack that gives
1915                         # removeExitListener() an opportunity to consume
1916                         # listeners from the stack, before they can get
1917                         # called below. This is necessary because a call
1918                         # to one exit listener may result in a call to
1919                         # removeExitListener() for another listener on
1920                         # the stack. That listener needs to be removed
1921                         # from the stack since it would be inconsistent
1922                         # to call it after it has been been passed into
1923                         # removeExitListener().
1924                         self._exit_listener_stack = self._exit_listeners
1925                         self._exit_listeners = None
1926
1927                         self._exit_listener_stack.reverse()
1928                         while self._exit_listener_stack:
1929                                 self._exit_listener_stack.pop()(self)
1930
1931 class AbstractPollTask(AsynchronousTask):
1932
1933         __slots__ = ("scheduler",) + \
1934                 ("_registered",)
1935
1936         _bufsize = 4096
1937         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1938         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1939                 _exceptional_events
1940
1941         def _unregister(self):
1942                 raise NotImplementedError(self)
1943
1944         def _unregister_if_appropriate(self, event):
1945                 if self._registered:
1946                         if event & self._exceptional_events:
1947                                 self._unregister()
1948                                 self.cancel()
1949                         elif event & PollConstants.POLLHUP:
1950                                 self._unregister()
1951                                 self.wait()
1952
1953 class PipeReader(AbstractPollTask):
1954
1955         """
1956         Reads output from one or more files and saves it in memory,
1957         for retrieval via the getvalue() method. This is driven by
1958         the scheduler's poll() loop, so it runs entirely within the
1959         current process.
1960         """
1961
1962         __slots__ = ("input_files",) + \
1963                 ("_read_data", "_reg_ids")
1964
1965         def _start(self):
1966                 self._reg_ids = set()
1967                 self._read_data = []
1968                 for k, f in self.input_files.iteritems():
1969                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1970                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1971                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1972                                 self._registered_events, self._output_handler))
1973                 self._registered = True
1974
1975         def isAlive(self):
1976                 return self._registered
1977
1978         def cancel(self):
1979                 if self.returncode is None:
1980                         self.returncode = 1
1981                         self.cancelled = True
1982                 self.wait()
1983
1984         def _wait(self):
1985                 if self.returncode is not None:
1986                         return self.returncode
1987
1988                 if self._registered:
1989                         self.scheduler.schedule(self._reg_ids)
1990                         self._unregister()
1991
1992                 self.returncode = os.EX_OK
1993                 return self.returncode
1994
1995         def getvalue(self):
1996                 """Retrieve the entire contents"""
1997                 if sys.hexversion >= 0x3000000:
1998                         return bytes().join(self._read_data)
1999                 return "".join(self._read_data)
2000
2001         def close(self):
2002                 """Free the memory buffer."""
2003                 self._read_data = None
2004
2005         def _output_handler(self, fd, event):
2006
2007                 if event & PollConstants.POLLIN:
2008
2009                         for f in self.input_files.itervalues():
2010                                 if fd == f.fileno():
2011                                         break
2012
2013                         buf = array.array('B')
2014                         try:
2015                                 buf.fromfile(f, self._bufsize)
2016                         except EOFError:
2017                                 pass
2018
2019                         if buf:
2020                                 self._read_data.append(buf.tostring())
2021                         else:
2022                                 self._unregister()
2023                                 self.wait()
2024
2025                 self._unregister_if_appropriate(event)
2026                 return self._registered
2027
2028         def _unregister(self):
2029                 """
2030                 Unregister from the scheduler and close open files.
2031                 """
2032
2033                 self._registered = False
2034
2035                 if self._reg_ids is not None:
2036                         for reg_id in self._reg_ids:
2037                                 self.scheduler.unregister(reg_id)
2038                         self._reg_ids = None
2039
2040                 if self.input_files is not None:
2041                         for f in self.input_files.itervalues():
2042                                 f.close()
2043                         self.input_files = None
2044
2045 class CompositeTask(AsynchronousTask):
2046
2047         __slots__ = ("scheduler",) + ("_current_task",)
2048
2049         def isAlive(self):
2050                 return self._current_task is not None
2051
2052         def cancel(self):
2053                 self.cancelled = True
2054                 if self._current_task is not None:
2055                         self._current_task.cancel()
2056
2057         def _poll(self):
2058                 """
2059                 This does a loop calling self._current_task.poll()
2060                 repeatedly as long as the value of self._current_task
2061                 keeps changing. It calls poll() a maximum of one time
2062                 for a given self._current_task instance. This is useful
2063                 since calling poll() on a task can trigger advance to
2064                 the next task could eventually lead to the returncode
2065                 being set in cases when polling only a single task would
2066                 not have the same effect.
2067                 """
2068
2069                 prev = None
2070                 while True:
2071                         task = self._current_task
2072                         if task is None or task is prev:
2073                                 # don't poll the same task more than once
2074                                 break
2075                         task.poll()
2076                         prev = task
2077
2078                 return self.returncode
2079
2080         def _wait(self):
2081
2082                 prev = None
2083                 while True:
2084                         task = self._current_task
2085                         if task is None:
2086                                 # don't wait for the same task more than once
2087                                 break
2088                         if task is prev:
2089                                 # Before the task.wait() method returned, an exit
2090                                 # listener should have set self._current_task to either
2091                                 # a different task or None. Something is wrong.
2092                                 raise AssertionError("self._current_task has not " + \
2093                                         "changed since calling wait", self, task)
2094                         task.wait()
2095                         prev = task
2096
2097                 return self.returncode
2098
2099         def _assert_current(self, task):
2100                 """
2101                 Raises an AssertionError if the given task is not the
2102                 same one as self._current_task. This can be useful
2103                 for detecting bugs.
2104                 """
2105                 if task is not self._current_task:
2106                         raise AssertionError("Unrecognized task: %s" % (task,))
2107
2108         def _default_exit(self, task):
2109                 """
2110                 Calls _assert_current() on the given task and then sets the
2111                 composite returncode attribute if task.returncode != os.EX_OK.
2112                 If the task failed then self._current_task will be set to None.
2113                 Subclasses can use this as a generic task exit callback.
2114
2115                 @rtype: int
2116                 @returns: The task.returncode attribute.
2117                 """
2118                 self._assert_current(task)
2119                 if task.returncode != os.EX_OK:
2120                         self.returncode = task.returncode
2121                         self._current_task = None
2122                 return task.returncode
2123
2124         def _final_exit(self, task):
2125                 """
2126                 Assumes that task is the final task of this composite task.
2127                 Calls _default_exit() and sets self.returncode to the task's
2128                 returncode and sets self._current_task to None.
2129                 """
2130                 self._default_exit(task)
2131                 self._current_task = None
2132                 self.returncode = task.returncode
2133                 return self.returncode
2134
2135         def _default_final_exit(self, task):
2136                 """
2137                 This calls _final_exit() and then wait().
2138
2139                 Subclasses can use this as a generic final task exit callback.
2140
2141                 """
2142                 self._final_exit(task)
2143                 return self.wait()
2144
2145         def _start_task(self, task, exit_handler):
2146                 """
2147                 Register exit handler for the given task, set it
2148                 as self._current_task, and call task.start().
2149
2150                 Subclasses can use this as a generic way to start
2151                 a task.
2152
2153                 """
2154                 task.addExitListener(exit_handler)
2155                 self._current_task = task
2156                 task.start()
2157
2158 class TaskSequence(CompositeTask):
2159         """
2160         A collection of tasks that executes sequentially. Each task
2161         must have a addExitListener() method that can be used as
2162         a means to trigger movement from one task to the next.
2163         """
2164
2165         __slots__ = ("_task_queue",)
2166
2167         def __init__(self, **kwargs):
2168                 AsynchronousTask.__init__(self, **kwargs)
2169                 self._task_queue = deque()
2170
2171         def add(self, task):
2172                 self._task_queue.append(task)
2173
2174         def _start(self):
2175                 self._start_next_task()
2176
2177         def cancel(self):
2178                 self._task_queue.clear()
2179                 CompositeTask.cancel(self)
2180
2181         def _start_next_task(self):
2182                 self._start_task(self._task_queue.popleft(),
2183                         self._task_exit_handler)
2184
2185         def _task_exit_handler(self, task):
2186                 if self._default_exit(task) != os.EX_OK:
2187                         self.wait()
2188                 elif self._task_queue:
2189                         self._start_next_task()
2190                 else:
2191                         self._final_exit(task)
2192                         self.wait()
2193
2194 class SubProcess(AbstractPollTask):
2195
2196         __slots__ = ("pid",) + \
2197                 ("_files", "_reg_id")
2198
2199         # A file descriptor is required for the scheduler to monitor changes from
2200         # inside a poll() loop. When logging is not enabled, create a pipe just to
2201         # serve this purpose alone.
2202         _dummy_pipe_fd = 9
2203
2204         def _poll(self):
2205                 if self.returncode is not None:
2206                         return self.returncode
2207                 if self.pid is None:
2208                         return self.returncode
2209                 if self._registered:
2210                         return self.returncode
2211
2212                 try:
2213                         retval = os.waitpid(self.pid, os.WNOHANG)
2214                 except OSError, e:
2215                         if e.errno != errno.ECHILD:
2216                                 raise
2217                         del e
2218                         retval = (self.pid, 1)
2219
2220                 if retval == (0, 0):
2221                         return None
2222                 self._set_returncode(retval)
2223                 return self.returncode
2224
2225         def cancel(self):
2226                 if self.isAlive():
2227                         try:
2228                                 os.kill(self.pid, signal.SIGTERM)
2229                         except OSError, e:
2230                                 if e.errno != errno.ESRCH:
2231                                         raise
2232                                 del e
2233
2234                 self.cancelled = True
2235                 if self.pid is not None:
2236                         self.wait()
2237                 return self.returncode
2238
2239         def isAlive(self):
2240                 return self.pid is not None and \
2241                         self.returncode is None
2242
2243         def _wait(self):
2244
2245                 if self.returncode is not None:
2246                         return self.returncode
2247
2248                 if self._registered:
2249                         self.scheduler.schedule(self._reg_id)
2250                         self._unregister()
2251                         if self.returncode is not None:
2252                                 return self.returncode
2253
2254                 try:
2255                         wait_retval = os.waitpid(self.pid, 0)
2256                 except OSError, e:
2257                         if e.errno != errno.ECHILD:
2258                                 raise
2259                         del e
2260                         self._set_returncode((self.pid, 1))
2261                 else:
2262                         self._set_returncode(wait_retval)
2263
2264                 return self.returncode
2265
2266         def _unregister(self):
2267                 """
2268                 Unregister from the scheduler and close open files.
2269                 """
2270
2271                 self._registered = False
2272
2273                 if self._reg_id is not None:
2274                         self.scheduler.unregister(self._reg_id)
2275                         self._reg_id = None
2276
2277                 if self._files is not None:
2278                         for f in self._files.itervalues():
2279                                 f.close()
2280                         self._files = None
2281
2282         def _set_returncode(self, wait_retval):
2283
2284                 retval = wait_retval[1]
2285
2286                 if retval != os.EX_OK:
2287                         if retval & 0xff:
2288                                 retval = (retval & 0xff) << 8
2289                         else:
2290                                 retval = retval >> 8
2291
2292                 self.returncode = retval
2293
2294 class SpawnProcess(SubProcess):
2295
2296         """
2297         Constructor keyword args are passed into portage.process.spawn().
2298         The required "args" keyword argument will be passed as the first
2299         spawn() argument.
2300         """
2301
2302         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2303                 "uid", "gid", "groups", "umask", "logfile",
2304                 "path_lookup", "pre_exec")
2305
2306         __slots__ = ("args",) + \
2307                 _spawn_kwarg_names
2308
2309         _file_names = ("log", "process", "stdout")
2310         _files_dict = slot_dict_class(_file_names, prefix="")
2311
2312         def _start(self):
2313
2314                 if self.cancelled:
2315                         return
2316
2317                 if self.fd_pipes is None:
2318                         self.fd_pipes = {}
2319                 fd_pipes = self.fd_pipes
2320                 fd_pipes.setdefault(0, sys.stdin.fileno())
2321                 fd_pipes.setdefault(1, sys.stdout.fileno())
2322                 fd_pipes.setdefault(2, sys.stderr.fileno())
2323
2324                 # flush any pending output
2325                 for fd in fd_pipes.itervalues():
2326                         if fd == sys.stdout.fileno():
2327                                 sys.stdout.flush()
2328                         if fd == sys.stderr.fileno():
2329                                 sys.stderr.flush()
2330
2331                 logfile = self.logfile
2332                 self._files = self._files_dict()
2333                 files = self._files
2334
2335                 master_fd, slave_fd = self._pipe(fd_pipes)
2336                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2337                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2338
2339                 null_input = None
2340                 fd_pipes_orig = fd_pipes.copy()
2341                 if self.background:
2342                         # TODO: Use job control functions like tcsetpgrp() to control
2343                         # access to stdin. Until then, use /dev/null so that any
2344                         # attempts to read from stdin will immediately return EOF
2345                         # instead of blocking indefinitely.
2346                         null_input = open('/dev/null', 'rb')
2347                         fd_pipes[0] = null_input.fileno()
2348                 else:
2349                         fd_pipes[0] = fd_pipes_orig[0]
2350
2351                 files.process = os.fdopen(master_fd, 'rb')
2352                 if logfile is not None:
2353
2354                         fd_pipes[1] = slave_fd
2355                         fd_pipes[2] = slave_fd
2356
2357                         files.log = open(logfile, mode='ab')
2358                         portage.util.apply_secpass_permissions(logfile,
2359                                 uid=portage.portage_uid, gid=portage.portage_gid,
2360                                 mode=0660)
2361
2362                         if not self.background:
2363                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2364
2365                         output_handler = self._output_handler
2366
2367                 else:
2368
2369                         # Create a dummy pipe so the scheduler can monitor
2370                         # the process from inside a poll() loop.
2371                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2372                         if self.background:
2373                                 fd_pipes[1] = slave_fd
2374                                 fd_pipes[2] = slave_fd
2375                         output_handler = self._dummy_handler
2376
2377                 kwargs = {}
2378                 for k in self._spawn_kwarg_names:
2379                         v = getattr(self, k)
2380                         if v is not None:
2381                                 kwargs[k] = v
2382
2383                 kwargs["fd_pipes"] = fd_pipes
2384                 kwargs["returnpid"] = True
2385                 kwargs.pop("logfile", None)
2386
2387                 self._reg_id = self.scheduler.register(files.process.fileno(),
2388                         self._registered_events, output_handler)
2389                 self._registered = True
2390
2391                 retval = self._spawn(self.args, **kwargs)
2392
2393                 os.close(slave_fd)
2394                 if null_input is not None:
2395                         null_input.close()
2396
2397                 if isinstance(retval, int):
2398                         # spawn failed
2399                         self._unregister()
2400                         self.returncode = retval
2401                         self.wait()
2402                         return
2403
2404                 self.pid = retval[0]
2405                 portage.process.spawned_pids.remove(self.pid)
2406
2407         def _pipe(self, fd_pipes):
2408                 """
2409                 @type fd_pipes: dict
2410                 @param fd_pipes: pipes from which to copy terminal size if desired.
2411                 """
2412                 return os.pipe()
2413
2414         def _spawn(self, args, **kwargs):
2415                 return portage.process.spawn(args, **kwargs)
2416
2417         def _output_handler(self, fd, event):
2418
2419                 if event & PollConstants.POLLIN:
2420
2421                         files = self._files
2422                         buf = array.array('B')
2423                         try:
2424                                 buf.fromfile(files.process, self._bufsize)
2425                         except EOFError:
2426                                 pass
2427
2428                         if buf:
2429                                 if not self.background:
2430                                         write_successful = False
2431                                         failures = 0
2432                                         while True:
2433                                                 try:
2434                                                         if not write_successful:
2435                                                                 buf.tofile(files.stdout)
2436                                                                 write_successful = True
2437                                                         files.stdout.flush()
2438                                                         break
2439                                                 except IOError, e:
2440                                                         if e.errno != errno.EAGAIN:
2441                                                                 raise
2442                                                         del e
2443                                                         failures += 1
2444                                                         if failures > 50:
2445                                                                 # Avoid a potentially infinite loop. In
2446                                                                 # most cases, the failure count is zero
2447                                                                 # and it's unlikely to exceed 1.
2448                                                                 raise
2449
2450                                                         # This means that a subprocess has put an inherited
2451                                                         # stdio file descriptor (typically stdin) into
2452                                                         # O_NONBLOCK mode. This is not acceptable (see bug
2453                                                         # #264435), so revert it. We need to use a loop
2454                                                         # here since there's a race condition due to
2455                                                         # parallel processes being able to change the
2456                                                         # flags on the inherited file descriptor.
2457                                                         # TODO: When possible, avoid having child processes
2458                                                         # inherit stdio file descriptors from portage
2459                                                         # (maybe it can't be avoided with
2460                                                         # PROPERTIES=interactive).
2461                                                         fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2462                                                                 fcntl.fcntl(files.stdout.fileno(),
2463                                                                 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2464
2465                                 buf.tofile(files.log)
2466                                 files.log.flush()
2467                         else:
2468                                 self._unregister()
2469                                 self.wait()
2470
2471                 self._unregister_if_appropriate(event)
2472                 return self._registered
2473
2474         def _dummy_handler(self, fd, event):
2475                 """
2476                 This method is mainly interested in detecting EOF, since
2477                 the only purpose of the pipe is to allow the scheduler to
2478                 monitor the process from inside a poll() loop.
2479                 """
2480
2481                 if event & PollConstants.POLLIN:
2482
2483                         buf = array.array('B')
2484                         try:
2485                                 buf.fromfile(self._files.process, self._bufsize)
2486                         except EOFError:
2487                                 pass
2488
2489                         if buf:
2490                                 pass
2491                         else:
2492                                 self._unregister()
2493                                 self.wait()
2494
2495                 self._unregister_if_appropriate(event)
2496                 return self._registered
2497
2498 class MiscFunctionsProcess(SpawnProcess):
2499         """
2500         Spawns misc-functions.sh with an existing ebuild environment.
2501         """
2502
2503         __slots__ = ("commands", "phase", "pkg", "settings")
2504
2505         def _start(self):
2506                 settings = self.settings
2507                 settings.pop("EBUILD_PHASE", None)
2508                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2509                 misc_sh_binary = os.path.join(portage_bin_path,
2510                         os.path.basename(portage.const.MISC_SH_BINARY))
2511
2512                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2513                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 portage._doebuild_exit_status_unlink(
2516                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2517
2518                 SpawnProcess._start(self)
2519
2520         def _spawn(self, args, **kwargs):
2521                 settings = self.settings
2522                 debug = settings.get("PORTAGE_DEBUG") == "1"
2523                 return portage.spawn(" ".join(args), settings,
2524                         debug=debug, **kwargs)
2525
2526         def _set_returncode(self, wait_retval):
2527                 SpawnProcess._set_returncode(self, wait_retval)
2528                 self.returncode = portage._doebuild_exit_status_check_and_log(
2529                         self.settings, self.phase, self.returncode)
2530
2531 class EbuildFetcher(SpawnProcess):
2532
2533         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2534                 ("_build_dir",)
2535
2536         def _start(self):
2537
2538                 root_config = self.pkg.root_config
2539                 portdb = root_config.trees["porttree"].dbapi
2540                 ebuild_path = portdb.findname(self.pkg.cpv)
2541                 settings = self.config_pool.allocate()
2542                 settings.setcpv(self.pkg)
2543
2544                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2545                 # should not be touched since otherwise it could interfere with
2546                 # another instance of the same cpv concurrently being built for a
2547                 # different $ROOT (currently, builds only cooperate with prefetchers
2548                 # that are spawned for the same $ROOT).
2549                 if not self.prefetch:
2550                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2551                         self._build_dir.lock()
2552                         self._build_dir.clean_log()
2553                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2554                         if self.logfile is None:
2555                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2556
2557                 phase = "fetch"
2558                 if self.fetchall:
2559                         phase = "fetchall"
2560
2561                 # If any incremental variables have been overridden
2562                 # via the environment, those values need to be passed
2563                 # along here so that they are correctly considered by
2564                 # the config instance in the subproccess.
2565                 fetch_env = os.environ.copy()
2566
2567                 nocolor = settings.get("NOCOLOR")
2568                 if nocolor is not None:
2569                         fetch_env["NOCOLOR"] = nocolor
2570
2571                 fetch_env["PORTAGE_NICENESS"] = "0"
2572                 if self.prefetch:
2573                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2574
2575                 ebuild_binary = os.path.join(
2576                         settings["PORTAGE_BIN_PATH"], "ebuild")
2577
2578                 fetch_args = [ebuild_binary, ebuild_path, phase]
2579                 debug = settings.get("PORTAGE_DEBUG") == "1"
2580                 if debug:
2581                         fetch_args.append("--debug")
2582
2583                 self.args = fetch_args
2584                 self.env = fetch_env
2585                 SpawnProcess._start(self)
2586
2587         def _pipe(self, fd_pipes):
2588                 """When appropriate, use a pty so that fetcher progress bars,
2589                 like wget has, will work properly."""
2590                 if self.background or not sys.stdout.isatty():
2591                         # When the output only goes to a log file,
2592                         # there's no point in creating a pty.
2593                         return os.pipe()
2594                 stdout_pipe = fd_pipes.get(1)
2595                 got_pty, master_fd, slave_fd = \
2596                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2597                 return (master_fd, slave_fd)
2598
2599         def _set_returncode(self, wait_retval):
2600                 SpawnProcess._set_returncode(self, wait_retval)
2601                 # Collect elog messages that might have been
2602                 # created by the pkg_nofetch phase.
2603                 if self._build_dir is not None:
2604                         # Skip elog messages for prefetch, in order to avoid duplicates.
2605                         if not self.prefetch and self.returncode != os.EX_OK:
2606                                 elog_out = None
2607                                 if self.logfile is not None:
2608                                         if self.background:
2609                                                 elog_out = open(self.logfile, 'a')
2610                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2611                                 if self.logfile is not None:
2612                                         msg += ", Log file:"
2613                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2614                                 if self.logfile is not None:
2615                                         eerror(" '%s'" % (self.logfile,),
2616                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2617                                 if elog_out is not None:
2618                                         elog_out.close()
2619                         if not self.prefetch:
2620                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2621                         features = self._build_dir.settings.features
2622                         if self.returncode == os.EX_OK:
2623                                 self._build_dir.clean_log()
2624                         self._build_dir.unlock()
2625                         self.config_pool.deallocate(self._build_dir.settings)
2626                         self._build_dir = None
2627
2628 class EbuildBuildDir(SlotObject):
2629
2630         __slots__ = ("dir_path", "pkg", "settings",
2631                 "locked", "_catdir", "_lock_obj")
2632
2633         def __init__(self, **kwargs):
2634                 SlotObject.__init__(self, **kwargs)
2635                 self.locked = False
2636
2637         def lock(self):
2638                 """
2639                 This raises an AlreadyLocked exception if lock() is called
2640                 while a lock is already held. In order to avoid this, call
2641                 unlock() or check whether the "locked" attribute is True
2642                 or False before calling lock().
2643                 """
2644                 if self._lock_obj is not None:
2645                         raise self.AlreadyLocked((self._lock_obj,))
2646
2647                 dir_path = self.dir_path
2648                 if dir_path is None:
2649                         root_config = self.pkg.root_config
2650                         portdb = root_config.trees["porttree"].dbapi
2651                         ebuild_path = portdb.findname(self.pkg.cpv)
2652                         settings = self.settings
2653                         settings.setcpv(self.pkg)
2654                         debug = settings.get("PORTAGE_DEBUG") == "1"
2655                         use_cache = 1 # always true
2656                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2657                                 self.settings, debug, use_cache, portdb)
2658                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2659
2660                 catdir = os.path.dirname(dir_path)
2661                 self._catdir = catdir
2662
2663                 portage.util.ensure_dirs(os.path.dirname(catdir),
2664                         gid=portage.portage_gid,
2665                         mode=070, mask=0)
2666                 catdir_lock = None
2667                 try:
2668                         catdir_lock = portage.locks.lockdir(catdir)
2669                         portage.util.ensure_dirs(catdir,
2670                                 gid=portage.portage_gid,
2671                                 mode=070, mask=0)
2672                         self._lock_obj = portage.locks.lockdir(dir_path)
2673                 finally:
2674                         self.locked = self._lock_obj is not None
2675                         if catdir_lock is not None:
2676                                 portage.locks.unlockdir(catdir_lock)
2677
2678         def clean_log(self):
2679                 """Discard existing log."""
2680                 settings = self.settings
2681
2682                 for x in ('.logid', 'temp/build.log'):
2683                         try:
2684                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2685                         except OSError:
2686                                 pass
2687
2688         def unlock(self):
2689                 if self._lock_obj is None:
2690                         return
2691
2692                 portage.locks.unlockdir(self._lock_obj)
2693                 self._lock_obj = None
2694                 self.locked = False
2695
2696                 catdir = self._catdir
2697                 catdir_lock = None
2698                 try:
2699                         catdir_lock = portage.locks.lockdir(catdir)
2700                 finally:
2701                         if catdir_lock:
2702                                 try:
2703                                         os.rmdir(catdir)
2704                                 except OSError, e:
2705                                         if e.errno not in (errno.ENOENT,
2706                                                 errno.ENOTEMPTY, errno.EEXIST):
2707                                                 raise
2708                                         del e
2709                                 portage.locks.unlockdir(catdir_lock)
2710
2711         class AlreadyLocked(portage.exception.PortageException):
2712                 pass
2713
2714 class EbuildBuild(CompositeTask):
2715
2716         __slots__ = ("args_set", "config_pool", "find_blockers",
2717                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2718                 "prefetcher", "settings", "world_atom") + \
2719                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2720
2721         def _start(self):
2722
2723                 logger = self.logger
2724                 opts = self.opts
2725                 pkg = self.pkg
2726                 settings = self.settings
2727                 world_atom = self.world_atom
2728                 root_config = pkg.root_config
2729                 tree = "porttree"
2730                 self._tree = tree
2731                 portdb = root_config.trees[tree].dbapi
2732                 settings.setcpv(pkg)
2733                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2734                 ebuild_path = portdb.findname(self.pkg.cpv)
2735                 self._ebuild_path = ebuild_path
2736
2737                 prefetcher = self.prefetcher
2738                 if prefetcher is None:
2739                         pass
2740                 elif not prefetcher.isAlive():
2741                         prefetcher.cancel()
2742                 elif prefetcher.poll() is None:
2743
2744                         waiting_msg = "Fetching files " + \
2745                                 "in the background. " + \
2746                                 "To view fetch progress, run `tail -f " + \
2747                                 "/var/log/emerge-fetch.log` in another " + \
2748                                 "terminal."
2749                         msg_prefix = colorize("GOOD", " * ")
2750                         from textwrap import wrap
2751                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2752                                 for line in wrap(waiting_msg, 65))
2753                         if not self.background:
2754                                 writemsg(waiting_msg, noiselevel=-1)
2755
2756                         self._current_task = prefetcher
2757                         prefetcher.addExitListener(self._prefetch_exit)
2758                         return
2759
2760                 self._prefetch_exit(prefetcher)
2761
2762         def _prefetch_exit(self, prefetcher):
2763
2764                 opts = self.opts
2765                 pkg = self.pkg
2766                 settings = self.settings
2767
2768                 if opts.fetchonly:
2769                                 fetcher = EbuildFetchonly(
2770                                         fetch_all=opts.fetch_all_uri,
2771                                         pkg=pkg, pretend=opts.pretend,
2772                                         settings=settings)
2773                                 retval = fetcher.execute()
2774                                 self.returncode = retval
2775                                 self.wait()
2776                                 return
2777
2778                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2779                         fetchall=opts.fetch_all_uri,
2780                         fetchonly=opts.fetchonly,
2781                         background=self.background,
2782                         pkg=pkg, scheduler=self.scheduler)
2783
2784                 self._start_task(fetcher, self._fetch_exit)
2785
2786         def _fetch_exit(self, fetcher):
2787                 opts = self.opts
2788                 pkg = self.pkg
2789
2790                 fetch_failed = False
2791                 if opts.fetchonly:
2792                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2793                 else:
2794                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2795
2796                 if fetch_failed and fetcher.logfile is not None and \
2797                         os.path.exists(fetcher.logfile):
2798                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2799
2800                 if not fetch_failed and fetcher.logfile is not None:
2801                         # Fetch was successful, so remove the fetch log.
2802                         try:
2803                                 os.unlink(fetcher.logfile)
2804                         except OSError:
2805                                 pass
2806
2807                 if fetch_failed or opts.fetchonly:
2808                         self.wait()
2809                         return
2810
2811                 logger = self.logger
2812                 opts = self.opts
2813                 pkg_count = self.pkg_count
2814                 scheduler = self.scheduler
2815                 settings = self.settings
2816                 features = settings.features
2817                 ebuild_path = self._ebuild_path
2818                 system_set = pkg.root_config.sets["system"]
2819
2820                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2821                 self._build_dir.lock()
2822
2823                 # Cleaning is triggered before the setup
2824                 # phase, in portage.doebuild().
2825                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2826                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2827                 short_msg = "emerge: (%s of %s) %s Clean" % \
2828                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2829                 logger.log(msg, short_msg=short_msg)
2830
2831                 #buildsyspkg: Check if we need to _force_ binary package creation
2832                 self._issyspkg = "buildsyspkg" in features and \
2833                                 system_set.findAtomForPackage(pkg) and \
2834                                 not opts.buildpkg
2835
2836                 if opts.buildpkg or self._issyspkg:
2837
2838                         self._buildpkg = True
2839
2840                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2841                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2842                         short_msg = "emerge: (%s of %s) %s Compile" % \
2843                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2844                         logger.log(msg, short_msg=short_msg)
2845
2846                 else:
2847                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2848                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2849                         short_msg = "emerge: (%s of %s) %s Compile" % \
2850                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2851                         logger.log(msg, short_msg=short_msg)
2852
2853                 build = EbuildExecuter(background=self.background, pkg=pkg,
2854                         scheduler=scheduler, settings=settings)
2855                 self._start_task(build, self._build_exit)
2856
2857         def _unlock_builddir(self):
2858                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2859                 self._build_dir.unlock()
2860
2861         def _build_exit(self, build):
2862                 if self._default_exit(build) != os.EX_OK:
2863                         self._unlock_builddir()
2864                         self.wait()
2865                         return
2866
2867                 opts = self.opts
2868                 buildpkg = self._buildpkg
2869
2870                 if not buildpkg:
2871                         self._final_exit(build)
2872                         self.wait()
2873                         return
2874
2875                 if self._issyspkg:
2876                         msg = ">>> This is a system package, " + \
2877                                 "let's pack a rescue tarball.\n"
2878
2879                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2880                         if log_path is not None:
2881                                 log_file = open(log_path, 'a')
2882                                 try:
2883                                         log_file.write(msg)
2884                                 finally:
2885                                         log_file.close()
2886
2887                         if not self.background:
2888                                 portage.writemsg_stdout(msg, noiselevel=-1)
2889
2890                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2891                         scheduler=self.scheduler, settings=self.settings)
2892
2893                 self._start_task(packager, self._buildpkg_exit)
2894
2895         def _buildpkg_exit(self, packager):
2896                 """
2897                 Released build dir lock when there is a failure or
2898                 when in buildpkgonly mode. Otherwise, the lock will
2899                 be released when merge() is called.
2900                 """
2901
2902                 if self._default_exit(packager) != os.EX_OK:
2903                         self._unlock_builddir()
2904                         self.wait()
2905                         return
2906
2907                 if self.opts.buildpkgonly:
2908                         # Need to call "clean" phase for buildpkgonly mode
2909                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2910                         phase = "clean"
2911                         clean_phase = EbuildPhase(background=self.background,
2912                                 pkg=self.pkg, phase=phase,
2913                                 scheduler=self.scheduler, settings=self.settings,
2914                                 tree=self._tree)
2915                         self._start_task(clean_phase, self._clean_exit)
2916                         return
2917
2918                 # Continue holding the builddir lock until
2919                 # after the package has been installed.
2920                 self._current_task = None
2921                 self.returncode = packager.returncode
2922                 self.wait()
2923
2924         def _clean_exit(self, clean_phase):
2925                 if self._final_exit(clean_phase) != os.EX_OK or \
2926                         self.opts.buildpkgonly:
2927                         self._unlock_builddir()
2928                 self.wait()
2929
2930         def install(self):
2931                 """
2932                 Install the package and then clean up and release locks.
2933                 Only call this after the build has completed successfully
2934                 and neither fetchonly nor buildpkgonly mode are enabled.
2935                 """
2936
2937                 find_blockers = self.find_blockers
2938                 ldpath_mtimes = self.ldpath_mtimes
2939                 logger = self.logger
2940                 pkg = self.pkg
2941                 pkg_count = self.pkg_count
2942                 settings = self.settings
2943                 world_atom = self.world_atom
2944                 ebuild_path = self._ebuild_path
2945                 tree = self._tree
2946
2947                 merge = EbuildMerge(find_blockers=self.find_blockers,
2948                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2949                         pkg_count=pkg_count, pkg_path=ebuild_path,
2950                         scheduler=self.scheduler,
2951                         settings=settings, tree=tree, world_atom=world_atom)
2952
2953                 msg = " === (%s of %s) Merging (%s::%s)" % \
2954                         (pkg_count.curval, pkg_count.maxval,
2955                         pkg.cpv, ebuild_path)
2956                 short_msg = "emerge: (%s of %s) %s Merge" % \
2957                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2958                 logger.log(msg, short_msg=short_msg)
2959
2960                 try:
2961                         rval = merge.execute()
2962                 finally:
2963                         self._unlock_builddir()
2964
2965                 return rval
2966
2967 class EbuildExecuter(CompositeTask):
2968
2969         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2970
2971         _phases = ("prepare", "configure", "compile", "test", "install")
2972
2973         _live_eclasses = frozenset([
2974                 "bzr",
2975                 "cvs",
2976                 "darcs",
2977                 "git",
2978                 "mercurial",
2979                 "subversion"
2980         ])
2981
2982         def _start(self):
2983                 self._tree = "porttree"
2984                 pkg = self.pkg
2985                 phase = "clean"
2986                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2987                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2988                 self._start_task(clean_phase, self._clean_phase_exit)
2989
2990         def _clean_phase_exit(self, clean_phase):
2991
2992                 if self._default_exit(clean_phase) != os.EX_OK:
2993                         self.wait()
2994                         return
2995
2996                 pkg = self.pkg
2997                 scheduler = self.scheduler
2998                 settings = self.settings
2999                 cleanup = 1
3000
3001                 # This initializes PORTAGE_LOG_FILE.
3002                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3003
3004                 setup_phase = EbuildPhase(background=self.background,
3005                         pkg=pkg, phase="setup", scheduler=scheduler,
3006                         settings=settings, tree=self._tree)
3007
3008                 setup_phase.addExitListener(self._setup_exit)
3009                 self._current_task = setup_phase
3010                 self.scheduler.scheduleSetup(setup_phase)
3011
3012         def _setup_exit(self, setup_phase):
3013
3014                 if self._default_exit(setup_phase) != os.EX_OK:
3015                         self.wait()
3016                         return
3017
3018                 unpack_phase = EbuildPhase(background=self.background,
3019                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3020                         settings=self.settings, tree=self._tree)
3021
3022                 if self._live_eclasses.intersection(self.pkg.inherited):
3023                         # Serialize $DISTDIR access for live ebuilds since
3024                         # otherwise they can interfere with eachother.
3025
3026                         unpack_phase.addExitListener(self._unpack_exit)
3027                         self._current_task = unpack_phase
3028                         self.scheduler.scheduleUnpack(unpack_phase)
3029
3030                 else:
3031                         self._start_task(unpack_phase, self._unpack_exit)
3032
3033         def _unpack_exit(self, unpack_phase):
3034
3035                 if self._default_exit(unpack_phase) != os.EX_OK:
3036                         self.wait()
3037                         return
3038
3039                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3040
3041                 pkg = self.pkg
3042                 phases = self._phases
3043                 eapi = pkg.metadata["EAPI"]
3044                 if eapi in ("0", "1"):
3045                         # skip src_prepare and src_configure
3046                         phases = phases[2:]
3047
3048                 for phase in phases:
3049                         ebuild_phases.add(EbuildPhase(background=self.background,
3050                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3051                                 settings=self.settings, tree=self._tree))
3052
3053                 self._start_task(ebuild_phases, self._default_final_exit)
3054
3055 class EbuildMetadataPhase(SubProcess):
3056
3057         """
3058         Asynchronous interface for the ebuild "depend" phase which is
3059         used to extract metadata from the ebuild.
3060         """
3061
3062         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3063                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3064                 ("_raw_metadata",)
3065
3066         _file_names = ("ebuild",)
3067         _files_dict = slot_dict_class(_file_names, prefix="")
3068         _metadata_fd = 9
3069
3070         def _start(self):
3071                 settings = self.settings
3072                 settings.setcpv(self.cpv)
3073                 ebuild_path = self.ebuild_path
3074
3075                 eapi = None
3076                 if 'parse-eapi-glep-55' in settings.features:
3077                         pf, eapi = portage._split_ebuild_name_glep55(
3078                                 os.path.basename(ebuild_path))
3079                 if eapi is None and \
3080                         'parse-eapi-ebuild-head' in settings.features:
3081                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3082                                 mode='r', encoding='utf_8', errors='replace'))
3083
3084                 if eapi is not None:
3085                         if not portage.eapi_is_supported(eapi):
3086                                 self.metadata_callback(self.cpv, self.ebuild_path,
3087                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3088                                 self.returncode = os.EX_OK
3089                                 self.wait()
3090                                 return
3091
3092                         settings.configdict['pkg']['EAPI'] = eapi
3093
3094                 debug = settings.get("PORTAGE_DEBUG") == "1"
3095                 master_fd = None
3096                 slave_fd = None
3097                 fd_pipes = None
3098                 if self.fd_pipes is not None:
3099                         fd_pipes = self.fd_pipes.copy()
3100                 else:
3101                         fd_pipes = {}
3102
3103                 fd_pipes.setdefault(0, sys.stdin.fileno())
3104                 fd_pipes.setdefault(1, sys.stdout.fileno())
3105                 fd_pipes.setdefault(2, sys.stderr.fileno())
3106
3107                 # flush any pending output
3108                 for fd in fd_pipes.itervalues():
3109                         if fd == sys.stdout.fileno():
3110                                 sys.stdout.flush()
3111                         if fd == sys.stderr.fileno():
3112                                 sys.stderr.flush()
3113
3114                 fd_pipes_orig = fd_pipes.copy()
3115                 self._files = self._files_dict()
3116                 files = self._files
3117
3118                 master_fd, slave_fd = os.pipe()
3119                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3120                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3121
3122                 fd_pipes[self._metadata_fd] = slave_fd
3123
3124                 self._raw_metadata = []
3125                 files.ebuild = os.fdopen(master_fd, 'r')
3126                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3127                         self._registered_events, self._output_handler)
3128                 self._registered = True
3129
3130                 retval = portage.doebuild(ebuild_path, "depend",
3131                         settings["ROOT"], settings, debug,
3132                         mydbapi=self.portdb, tree="porttree",
3133                         fd_pipes=fd_pipes, returnpid=True)
3134
3135                 os.close(slave_fd)
3136
3137                 if isinstance(retval, int):
3138                         # doebuild failed before spawning
3139                         self._unregister()
3140                         self.returncode = retval
3141                         self.wait()
3142                         return
3143
3144                 self.pid = retval[0]
3145                 portage.process.spawned_pids.remove(self.pid)
3146
3147         def _output_handler(self, fd, event):
3148
3149                 if event & PollConstants.POLLIN:
3150                         self._raw_metadata.append(self._files.ebuild.read())
3151                         if not self._raw_metadata[-1]:
3152                                 self._unregister()
3153                                 self.wait()
3154
3155                 self._unregister_if_appropriate(event)
3156                 return self._registered
3157
3158         def _set_returncode(self, wait_retval):
3159                 SubProcess._set_returncode(self, wait_retval)
3160                 if self.returncode == os.EX_OK:
3161                         metadata_lines = "".join(self._raw_metadata).splitlines()
3162                         if len(portage.auxdbkeys) != len(metadata_lines):
3163                                 # Don't trust bash's returncode if the
3164                                 # number of lines is incorrect.
3165                                 self.returncode = 1
3166                         else:
3167                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3168                                 self.metadata = self.metadata_callback(self.cpv,
3169                                         self.ebuild_path, self.repo_path, metadata,
3170                                         self.ebuild_mtime)
3171
3172 class EbuildProcess(SpawnProcess):
3173
3174         __slots__ = ("phase", "pkg", "settings", "tree")
3175
3176         def _start(self):
3177                 # Don't open the log file during the clean phase since the
3178                 # open file can result in an nfs lock on $T/build.log which
3179                 # prevents the clean phase from removing $T.
3180                 if self.phase not in ("clean", "cleanrm"):
3181                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3182                 SpawnProcess._start(self)
3183
3184         def _pipe(self, fd_pipes):
3185                 stdout_pipe = fd_pipes.get(1)
3186                 got_pty, master_fd, slave_fd = \
3187                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3188                 return (master_fd, slave_fd)
3189
3190         def _spawn(self, args, **kwargs):
3191
3192                 root_config = self.pkg.root_config
3193                 tree = self.tree
3194                 mydbapi = root_config.trees[tree].dbapi
3195                 settings = self.settings
3196                 ebuild_path = settings["EBUILD"]
3197                 debug = settings.get("PORTAGE_DEBUG") == "1"
3198
3199                 rval = portage.doebuild(ebuild_path, self.phase,
3200                         root_config.root, settings, debug,
3201                         mydbapi=mydbapi, tree=tree, **kwargs)
3202
3203                 return rval
3204
3205         def _set_returncode(self, wait_retval):
3206                 SpawnProcess._set_returncode(self, wait_retval)
3207
3208                 if self.phase not in ("clean", "cleanrm"):
3209                         self.returncode = portage._doebuild_exit_status_check_and_log(
3210                                 self.settings, self.phase, self.returncode)
3211
3212                 if self.phase == "test" and self.returncode != os.EX_OK and \
3213                         "test-fail-continue" in self.settings.features:
3214                         self.returncode = os.EX_OK
3215
3216                 portage._post_phase_userpriv_perms(self.settings)
3217
3218 class EbuildPhase(CompositeTask):
3219
3220         __slots__ = ("background", "pkg", "phase",
3221                 "scheduler", "settings", "tree")
3222
3223         _post_phase_cmds = portage._post_phase_cmds
3224
3225         def _start(self):
3226
3227                 ebuild_process = EbuildProcess(background=self.background,
3228                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3229                         settings=self.settings, tree=self.tree)
3230
3231                 self._start_task(ebuild_process, self._ebuild_exit)
3232
3233         def _ebuild_exit(self, ebuild_process):
3234
3235                 if self.phase == "install":
3236                         out = None
3237                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3238                         log_file = None
3239                         if self.background and log_path is not None:
3240                                 log_file = open(log_path, 'a')
3241                                 out = log_file
3242                         try:
3243                                 portage._check_build_log(self.settings, out=out)
3244                         finally:
3245                                 if log_file is not None:
3246                                         log_file.close()
3247
3248                 if self._default_exit(ebuild_process) != os.EX_OK:
3249                         self.wait()
3250                         return
3251
3252                 settings = self.settings
3253
3254                 if self.phase == "install":
3255                         portage._post_src_install_chost_fix(settings)
3256                         portage._post_src_install_uid_fix(settings)
3257
3258                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3259                 if post_phase_cmds is not None:
3260                         post_phase = MiscFunctionsProcess(background=self.background,
3261                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3262                                 scheduler=self.scheduler, settings=settings)
3263                         self._start_task(post_phase, self._post_phase_exit)
3264                         return
3265
3266                 self.returncode = ebuild_process.returncode
3267                 self._current_task = None
3268                 self.wait()
3269
3270         def _post_phase_exit(self, post_phase):
3271                 if self._final_exit(post_phase) != os.EX_OK:
3272                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3273                                 noiselevel=-1)
3274                 self._current_task = None
3275                 self.wait()
3276                 return
3277
3278 class EbuildBinpkg(EbuildProcess):
3279         """
3280         This assumes that src_install() has successfully completed.
3281         """
3282         __slots__ = ("_binpkg_tmpfile",)
3283
3284         def _start(self):
3285                 self.phase = "package"
3286                 self.tree = "porttree"
3287                 pkg = self.pkg
3288                 root_config = pkg.root_config
3289                 portdb = root_config.trees["porttree"].dbapi
3290                 bintree = root_config.trees["bintree"]
3291                 ebuild_path = portdb.findname(self.pkg.cpv)
3292                 settings = self.settings
3293                 debug = settings.get("PORTAGE_DEBUG") == "1"
3294
3295                 bintree.prevent_collision(pkg.cpv)
3296                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3297                         pkg.cpv + ".tbz2." + str(os.getpid()))
3298                 self._binpkg_tmpfile = binpkg_tmpfile
3299                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3300                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3301
3302                 try:
3303                         EbuildProcess._start(self)
3304                 finally:
3305                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3306
3307         def _set_returncode(self, wait_retval):
3308                 EbuildProcess._set_returncode(self, wait_retval)
3309
3310                 pkg = self.pkg
3311                 bintree = pkg.root_config.trees["bintree"]
3312                 binpkg_tmpfile = self._binpkg_tmpfile
3313                 if self.returncode == os.EX_OK:
3314                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3315
3316 class EbuildMerge(SlotObject):
3317
3318         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3319                 "pkg", "pkg_count", "pkg_path", "pretend",
3320                 "scheduler", "settings", "tree", "world_atom")
3321
3322         def execute(self):
3323                 root_config = self.pkg.root_config
3324                 settings = self.settings
3325                 retval = portage.merge(settings["CATEGORY"],
3326                         settings["PF"], settings["D"],
3327                         os.path.join(settings["PORTAGE_BUILDDIR"],
3328                         "build-info"), root_config.root, settings,
3329                         myebuild=settings["EBUILD"],
3330                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3331                         vartree=root_config.trees["vartree"],
3332                         prev_mtimes=self.ldpath_mtimes,
3333                         scheduler=self.scheduler,
3334                         blockers=self.find_blockers)
3335
3336                 if retval == os.EX_OK:
3337                         self.world_atom(self.pkg)
3338                         self._log_success()
3339
3340                 return retval
3341
3342         def _log_success(self):
3343                 pkg = self.pkg
3344                 pkg_count = self.pkg_count
3345                 pkg_path = self.pkg_path
3346                 logger = self.logger
3347                 if "noclean" not in self.settings.features:
3348                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3349                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3350                         logger.log((" === (%s of %s) " + \
3351                                 "Post-Build Cleaning (%s::%s)") % \
3352                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3353                                 short_msg=short_msg)
3354                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3355                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3356
3357 class PackageUninstall(AsynchronousTask):
3358
3359         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3360
3361         def _start(self):
3362                 try:
3363                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3364                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3365                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3366                                 writemsg_level=self._writemsg_level)
3367                 except UninstallFailure, e:
3368                         self.returncode = e.status
3369                 else:
3370                         self.returncode = os.EX_OK
3371                 self.wait()
3372
3373         def _writemsg_level(self, msg, level=0, noiselevel=0):
3374
3375                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3376                 background = self.background
3377
3378                 if log_path is None:
3379                         if not (background and level < logging.WARNING):
3380                                 portage.util.writemsg_level(msg,
3381                                         level=level, noiselevel=noiselevel)
3382                 else:
3383                         if not background:
3384                                 portage.util.writemsg_level(msg,
3385                                         level=level, noiselevel=noiselevel)
3386
3387                         f = open(log_path, 'a')
3388                         try:
3389                                 f.write(msg)
3390                         finally:
3391                                 f.close()
3392
3393 class Binpkg(CompositeTask):
3394
3395         __slots__ = ("find_blockers",
3396                 "ldpath_mtimes", "logger", "opts",
3397                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3398                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3399                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3400
3401         def _writemsg_level(self, msg, level=0, noiselevel=0):
3402
3403                 if not self.background:
3404                         portage.util.writemsg_level(msg,
3405                                 level=level, noiselevel=noiselevel)
3406
3407                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3408                 if  log_path is not None:
3409                         f = open(log_path, 'a')
3410                         try:
3411                                 f.write(msg)
3412                         finally:
3413                                 f.close()
3414
3415         def _start(self):
3416
3417                 pkg = self.pkg
3418                 settings = self.settings
3419                 settings.setcpv(pkg)
3420                 self._tree = "bintree"
3421                 self._bintree = self.pkg.root_config.trees[self._tree]
3422                 self._verify = not self.opts.pretend
3423
3424                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3425                         "portage", pkg.category, pkg.pf)
3426                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3427                         pkg=pkg, settings=settings)
3428                 self._image_dir = os.path.join(dir_path, "image")
3429                 self._infloc = os.path.join(dir_path, "build-info")
3430                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3431                 settings["EBUILD"] = self._ebuild_path
3432                 debug = settings.get("PORTAGE_DEBUG") == "1"
3433                 portage.doebuild_environment(self._ebuild_path, "setup",
3434                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3435                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3436
3437                 # The prefetcher has already completed or it
3438                 # could be running now. If it's running now,
3439                 # wait for it to complete since it holds
3440                 # a lock on the file being fetched. The
3441                 # portage.locks functions are only designed
3442                 # to work between separate processes. Since
3443                 # the lock is held by the current process,
3444                 # use the scheduler and fetcher methods to
3445                 # synchronize with the fetcher.
3446                 prefetcher = self.prefetcher
3447                 if prefetcher is None:
3448                         pass
3449                 elif not prefetcher.isAlive():
3450                         prefetcher.cancel()
3451                 elif prefetcher.poll() is None:
3452
3453                         waiting_msg = ("Fetching '%s' " + \
3454                                 "in the background. " + \
3455                                 "To view fetch progress, run `tail -f " + \
3456                                 "/var/log/emerge-fetch.log` in another " + \
3457                                 "terminal.") % prefetcher.pkg_path
3458                         msg_prefix = colorize("GOOD", " * ")
3459                         from textwrap import wrap
3460                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3461                                 for line in wrap(waiting_msg, 65))
3462                         if not self.background:
3463                                 writemsg(waiting_msg, noiselevel=-1)
3464
3465                         self._current_task = prefetcher
3466                         prefetcher.addExitListener(self._prefetch_exit)
3467                         return
3468
3469                 self._prefetch_exit(prefetcher)
3470
3471         def _prefetch_exit(self, prefetcher):
3472
3473                 pkg = self.pkg
3474                 pkg_count = self.pkg_count
3475                 if not (self.opts.pretend or self.opts.fetchonly):
3476                         self._build_dir.lock()
3477                         # If necessary, discard old log so that we don't
3478                         # append to it.
3479                         self._build_dir.clean_log()
3480                         # Initialze PORTAGE_LOG_FILE.
3481                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3482                 fetcher = BinpkgFetcher(background=self.background,
3483                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3484                         pretend=self.opts.pretend, scheduler=self.scheduler)
3485                 pkg_path = fetcher.pkg_path
3486                 self._pkg_path = pkg_path
3487
3488                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3489
3490                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3491                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3492                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3493                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3494                         self.logger.log(msg, short_msg=short_msg)
3495                         self._start_task(fetcher, self._fetcher_exit)
3496                         return
3497
3498                 self._fetcher_exit(fetcher)
3499
3500         def _fetcher_exit(self, fetcher):
3501
3502                 # The fetcher only has a returncode when
3503                 # --getbinpkg is enabled.
3504                 if fetcher.returncode is not None:
3505                         self._fetched_pkg = True
3506                         if self._default_exit(fetcher) != os.EX_OK:
3507                                 self._unlock_builddir()
3508                                 self.wait()
3509                                 return
3510
3511                 if self.opts.pretend:
3512                         self._current_task = None
3513                         self.returncode = os.EX_OK
3514                         self.wait()
3515                         return
3516
3517                 verifier = None
3518                 if self._verify:
3519                         logfile = None
3520                         if self.background:
3521                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3522                         verifier = BinpkgVerifier(background=self.background,
3523                                 logfile=logfile, pkg=self.pkg)
3524                         self._start_task(verifier, self._verifier_exit)
3525                         return
3526
3527                 self._verifier_exit(verifier)
3528
3529         def _verifier_exit(self, verifier):
3530                 if verifier is not None and \
3531                         self._default_exit(verifier) != os.EX_OK:
3532                         self._unlock_builddir()
3533                         self.wait()
3534                         return
3535
3536                 logger = self.logger
3537                 pkg = self.pkg
3538                 pkg_count = self.pkg_count
3539                 pkg_path = self._pkg_path
3540
3541                 if self._fetched_pkg:
3542                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3543
3544                 if self.opts.fetchonly:
3545                         self._current_task = None
3546                         self.returncode = os.EX_OK
3547                         self.wait()
3548                         return
3549
3550                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3551                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3552                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3553                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3554                 logger.log(msg, short_msg=short_msg)
3555
3556                 phase = "clean"
3557                 settings = self.settings
3558                 ebuild_phase = EbuildPhase(background=self.background,
3559                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3560                         settings=settings, tree=self._tree)
3561
3562                 self._start_task(ebuild_phase, self._clean_exit)
3563
3564         def _clean_exit(self, clean_phase):
3565                 if self._default_exit(clean_phase) != os.EX_OK:
3566                         self._unlock_builddir()
3567                         self.wait()
3568                         return
3569
3570                 dir_path = self._build_dir.dir_path
3571
3572                 infloc = self._infloc
3573                 pkg = self.pkg
3574                 pkg_path = self._pkg_path
3575
3576                 dir_mode = 0755
3577                 for mydir in (dir_path, self._image_dir, infloc):
3578                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3579                                 gid=portage.data.portage_gid, mode=dir_mode)
3580
3581                 # This initializes PORTAGE_LOG_FILE.
3582                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3583                 self._writemsg_level(">>> Extracting info\n")
3584
3585                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3586                 check_missing_metadata = ("CATEGORY", "PF")
3587                 missing_metadata = set()
3588                 for k in check_missing_metadata:
3589                         v = pkg_xpak.getfile(k)
3590                         if not v:
3591                                 missing_metadata.add(k)
3592
3593                 pkg_xpak.unpackinfo(infloc)
3594                 for k in missing_metadata:
3595                         if k == "CATEGORY":
3596                                 v = pkg.category
3597                         elif k == "PF":
3598                                 v = pkg.pf
3599                         else:
3600                                 continue
3601
3602                         f = open(os.path.join(infloc, k), 'wb')
3603                         try:
3604                                 f.write(v + "\n")
3605                         finally:
3606                                 f.close()
3607
3608                 # Store the md5sum in the vdb.
3609                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3610                 try:
3611                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3612                 finally:
3613                         f.close()
3614
3615                 # This gives bashrc users an opportunity to do various things
3616                 # such as remove binary packages after they're installed.
3617                 settings = self.settings
3618                 settings.setcpv(self.pkg)
3619                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3620                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3621
3622                 phase = "setup"
3623                 setup_phase = EbuildPhase(background=self.background,
3624                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3625                         settings=settings, tree=self._tree)
3626
3627                 setup_phase.addExitListener(self._setup_exit)
3628                 self._current_task = setup_phase
3629                 self.scheduler.scheduleSetup(setup_phase)
3630
3631         def _setup_exit(self, setup_phase):
3632                 if self._default_exit(setup_phase) != os.EX_OK:
3633                         self._unlock_builddir()
3634                         self.wait()
3635                         return
3636
3637                 extractor = BinpkgExtractorAsync(background=self.background,
3638                         image_dir=self._image_dir,
3639                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3640                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3641                 self._start_task(extractor, self._extractor_exit)
3642
3643         def _extractor_exit(self, extractor):
3644                 if self._final_exit(extractor) != os.EX_OK:
3645                         self._unlock_builddir()
3646                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3647                                 noiselevel=-1)
3648                 self.wait()
3649
3650         def _unlock_builddir(self):
3651                 if self.opts.pretend or self.opts.fetchonly:
3652                         return
3653                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3654                 self._build_dir.unlock()
3655
3656         def install(self):
3657
3658                 # This gives bashrc users an opportunity to do various things
3659                 # such as remove binary packages after they're installed.
3660                 settings = self.settings
3661                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3662                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3663
3664                 merge = EbuildMerge(find_blockers=self.find_blockers,
3665                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3666                         pkg=self.pkg, pkg_count=self.pkg_count,
3667                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3668                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3669
3670                 try:
3671                         retval = merge.execute()
3672                 finally:
3673                         settings.pop("PORTAGE_BINPKG_FILE", None)
3674                         self._unlock_builddir()
3675                 return retval
3676
3677 class BinpkgFetcher(SpawnProcess):
3678
3679         __slots__ = ("pkg", "pretend",
3680                 "locked", "pkg_path", "_lock_obj")
3681
3682         def __init__(self, **kwargs):
3683                 SpawnProcess.__init__(self, **kwargs)
3684                 pkg = self.pkg
3685                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3686
3687         def _start(self):
3688
3689                 if self.cancelled:
3690                         return
3691
3692                 pkg = self.pkg
3693                 pretend = self.pretend
3694                 bintree = pkg.root_config.trees["bintree"]
3695                 settings = bintree.settings
3696                 use_locks = "distlocks" in settings.features
3697                 pkg_path = self.pkg_path
3698
3699                 if not pretend:
3700                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3701                         if use_locks:
3702                                 self.lock()
3703                 exists = os.path.exists(pkg_path)
3704                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3705                 if not (pretend or resume):
3706                         # Remove existing file or broken symlink.
3707                         try:
3708                                 os.unlink(pkg_path)
3709                         except OSError:
3710                                 pass
3711
3712                 # urljoin doesn't work correctly with
3713                 # unrecognized protocols like sftp
3714                 if bintree._remote_has_index:
3715                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3716                         if not rel_uri:
3717                                 rel_uri = pkg.cpv + ".tbz2"
3718                         uri = bintree._remote_base_uri.rstrip("/") + \
3719                                 "/" + rel_uri.lstrip("/")
3720                 else:
3721                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3722                                 "/" + pkg.pf + ".tbz2"
3723
3724                 if pretend:
3725                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3726                         self.returncode = os.EX_OK
3727                         self.wait()
3728                         return
3729
3730                 protocol = urlparse.urlparse(uri)[0]
3731                 fcmd_prefix = "FETCHCOMMAND"
3732                 if resume:
3733                         fcmd_prefix = "RESUMECOMMAND"
3734                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3735                 if not fcmd:
3736                         fcmd = settings.get(fcmd_prefix)
3737
3738                 fcmd_vars = {
3739                         "DISTDIR" : os.path.dirname(pkg_path),
3740                         "URI"     : uri,
3741                         "FILE"    : os.path.basename(pkg_path)
3742                 }
3743
3744                 fetch_env = dict(settings.iteritems())
3745                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3746                         for x in shlex.split(fcmd)]
3747
3748                 if self.fd_pipes is None:
3749                         self.fd_pipes = {}
3750                 fd_pipes = self.fd_pipes
3751
3752                 # Redirect all output to stdout since some fetchers like
3753                 # wget pollute stderr (if portage detects a problem then it
3754                 # can send it's own message to stderr).
3755                 fd_pipes.setdefault(0, sys.stdin.fileno())
3756                 fd_pipes.setdefault(1, sys.stdout.fileno())
3757                 fd_pipes.setdefault(2, sys.stdout.fileno())
3758
3759                 self.args = fetch_args
3760                 self.env = fetch_env
3761                 SpawnProcess._start(self)
3762
3763         def _set_returncode(self, wait_retval):
3764                 SpawnProcess._set_returncode(self, wait_retval)
3765                 if self.returncode == os.EX_OK:
3766                         # If possible, update the mtime to match the remote package if
3767                         # the fetcher didn't already do it automatically.
3768                         bintree = self.pkg.root_config.trees["bintree"]
3769                         if bintree._remote_has_index:
3770                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3771                                 if remote_mtime is not None:
3772                                         try:
3773                                                 remote_mtime = long(remote_mtime)
3774                                         except ValueError:
3775                                                 pass
3776                                         else:
3777                                                 try:
3778                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3779                                                 except OSError:
3780                                                         pass
3781                                                 else:
3782                                                         if remote_mtime != local_mtime:
3783                                                                 try:
3784                                                                         os.utime(self.pkg_path,
3785                                                                                 (remote_mtime, remote_mtime))
3786                                                                 except OSError:
3787                                                                         pass
3788
3789                 if self.locked:
3790                         self.unlock()
3791
3792         def lock(self):
3793                 """
3794                 This raises an AlreadyLocked exception if lock() is called
3795                 while a lock is already held. In order to avoid this, call
3796                 unlock() or check whether the "locked" attribute is True
3797                 or False before calling lock().
3798                 """
3799                 if self._lock_obj is not None:
3800                         raise self.AlreadyLocked((self._lock_obj,))
3801
3802                 self._lock_obj = portage.locks.lockfile(
3803                         self.pkg_path, wantnewlockfile=1)
3804                 self.locked = True
3805
3806         class AlreadyLocked(portage.exception.PortageException):
3807                 pass
3808
3809         def unlock(self):
3810                 if self._lock_obj is None:
3811                         return
3812                 portage.locks.unlockfile(self._lock_obj)
3813                 self._lock_obj = None
3814                 self.locked = False
3815
3816 class BinpkgVerifier(AsynchronousTask):
3817         __slots__ = ("logfile", "pkg",)
3818
3819         def _start(self):
3820                 """
3821                 Note: Unlike a normal AsynchronousTask.start() method,
3822                 this one does all work is synchronously. The returncode
3823                 attribute will be set before it returns.
3824                 """
3825
3826                 pkg = self.pkg
3827                 root_config = pkg.root_config
3828                 bintree = root_config.trees["bintree"]
3829                 rval = os.EX_OK
3830                 stdout_orig = sys.stdout
3831                 stderr_orig = sys.stderr
3832                 log_file = None
3833                 if self.background and self.logfile is not None:
3834                         log_file = open(self.logfile, 'a')
3835                 try:
3836                         if log_file is not None:
3837                                 sys.stdout = log_file
3838                                 sys.stderr = log_file
3839                         try:
3840                                 bintree.digestCheck(pkg)
3841                         except portage.exception.FileNotFound:
3842                                 writemsg("!!! Fetching Binary failed " + \
3843                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3844                                 rval = 1
3845                         except portage.exception.DigestException, e:
3846                                 writemsg("\n!!! Digest verification failed:\n",
3847                                         noiselevel=-1)
3848                                 writemsg("!!! %s\n" % e.value[0],
3849                                         noiselevel=-1)
3850                                 writemsg("!!! Reason: %s\n" % e.value[1],
3851                                         noiselevel=-1)
3852                                 writemsg("!!! Got: %s\n" % e.value[2],
3853                                         noiselevel=-1)
3854                                 writemsg("!!! Expected: %s\n" % e.value[3],
3855                                         noiselevel=-1)
3856                                 rval = 1
3857                         if rval != os.EX_OK:
3858                                 pkg_path = bintree.getname(pkg.cpv)
3859                                 head, tail = os.path.split(pkg_path)
3860                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3861                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3862                                         noiselevel=-1)
3863                 finally:
3864                         sys.stdout = stdout_orig
3865                         sys.stderr = stderr_orig
3866                         if log_file is not None:
3867                                 log_file.close()
3868
3869                 self.returncode = rval
3870                 self.wait()
3871
3872 class BinpkgPrefetcher(CompositeTask):
3873
3874         __slots__ = ("pkg",) + \
3875                 ("pkg_path", "_bintree",)
3876
3877         def _start(self):
3878                 self._bintree = self.pkg.root_config.trees["bintree"]
3879                 fetcher = BinpkgFetcher(background=self.background,
3880                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3881                         scheduler=self.scheduler)
3882                 self.pkg_path = fetcher.pkg_path
3883                 self._start_task(fetcher, self._fetcher_exit)
3884
3885         def _fetcher_exit(self, fetcher):
3886
3887                 if self._default_exit(fetcher) != os.EX_OK:
3888                         self.wait()
3889                         return
3890
3891                 verifier = BinpkgVerifier(background=self.background,
3892                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3893                 self._start_task(verifier, self._verifier_exit)
3894
3895         def _verifier_exit(self, verifier):
3896                 if self._default_exit(verifier) != os.EX_OK:
3897                         self.wait()
3898                         return
3899
3900                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3901
3902                 self._current_task = None
3903                 self.returncode = os.EX_OK
3904                 self.wait()
3905
3906 class BinpkgExtractorAsync(SpawnProcess):
3907
3908         __slots__ = ("image_dir", "pkg", "pkg_path")
3909
3910         _shell_binary = portage.const.BASH_BINARY
3911
3912         def _start(self):
3913                 self.args = [self._shell_binary, "-c",
3914                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3915                         (portage._shell_quote(self.pkg_path),
3916                         portage._shell_quote(self.image_dir))]
3917
3918                 self.env = self.pkg.root_config.settings.environ()
3919                 SpawnProcess._start(self)
3920
3921 class MergeListItem(CompositeTask):
3922
3923         """
3924         TODO: For parallel scheduling, everything here needs asynchronous
3925         execution support (start, poll, and wait methods).
3926         """
3927
3928         __slots__ = ("args_set",
3929                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3930                 "find_blockers", "logger", "mtimedb", "pkg",
3931                 "pkg_count", "pkg_to_replace", "prefetcher",
3932                 "settings", "statusMessage", "world_atom") + \
3933                 ("_install_task",)
3934
3935         def _start(self):
3936
3937                 pkg = self.pkg
3938                 build_opts = self.build_opts
3939
3940                 if pkg.installed:
3941                         # uninstall,  executed by self.merge()
3942                         self.returncode = os.EX_OK
3943                         self.wait()
3944                         return
3945
3946                 args_set = self.args_set
3947                 find_blockers = self.find_blockers
3948                 logger = self.logger
3949                 mtimedb = self.mtimedb
3950                 pkg_count = self.pkg_count
3951                 scheduler = self.scheduler
3952                 settings = self.settings
3953                 world_atom = self.world_atom
3954                 ldpath_mtimes = mtimedb["ldpath"]
3955
3956                 action_desc = "Emerging"
3957                 preposition = "for"
3958                 if pkg.type_name == "binary":
3959                         action_desc += " binary"
3960
3961                 if build_opts.fetchonly:
3962                         action_desc = "Fetching"
3963
3964                 msg = "%s (%s of %s) %s" % \
3965                         (action_desc,
3966                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3967                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3968                         colorize("GOOD", pkg.cpv))
3969
3970                 portdb = pkg.root_config.trees["porttree"].dbapi
3971                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3972                 if portdir_repo_name:
3973                         pkg_repo_name = pkg.metadata.get("repository")
3974                         if pkg_repo_name != portdir_repo_name:
3975                                 if not pkg_repo_name:
3976                                         pkg_repo_name = "unknown repo"
3977                                 msg += " from %s" % pkg_repo_name
3978
3979                 if pkg.root != "/":
3980                         msg += " %s %s" % (preposition, pkg.root)
3981
3982                 if not build_opts.pretend:
3983                         self.statusMessage(msg)
3984                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3985                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3986
3987                 if pkg.type_name == "ebuild":
3988
3989                         build = EbuildBuild(args_set=args_set,
3990                                 background=self.background,
3991                                 config_pool=self.config_pool,
3992                                 find_blockers=find_blockers,
3993                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3994                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3995                                 prefetcher=self.prefetcher, scheduler=scheduler,
3996                                 settings=settings, world_atom=world_atom)
3997
3998                         self._install_task = build
3999                         self._start_task(build, self._default_final_exit)
4000                         return
4001
4002                 elif pkg.type_name == "binary":
4003
4004                         binpkg = Binpkg(background=self.background,
4005                                 find_blockers=find_blockers,
4006                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4007                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4008                                 prefetcher=self.prefetcher, settings=settings,
4009                                 scheduler=scheduler, world_atom=world_atom)
4010
4011                         self._install_task = binpkg
4012                         self._start_task(binpkg, self._default_final_exit)
4013                         return
4014
4015         def _poll(self):
4016                 self._install_task.poll()
4017                 return self.returncode
4018
4019         def _wait(self):
4020                 self._install_task.wait()
4021                 return self.returncode
4022
4023         def merge(self):
4024
4025                 pkg = self.pkg
4026                 build_opts = self.build_opts
4027                 find_blockers = self.find_blockers
4028                 logger = self.logger
4029                 mtimedb = self.mtimedb
4030                 pkg_count = self.pkg_count
4031                 prefetcher = self.prefetcher
4032                 scheduler = self.scheduler
4033                 settings = self.settings
4034                 world_atom = self.world_atom
4035                 ldpath_mtimes = mtimedb["ldpath"]
4036
4037                 if pkg.installed:
4038                         if not (build_opts.buildpkgonly or \
4039                                 build_opts.fetchonly or build_opts.pretend):
4040
4041                                 uninstall = PackageUninstall(background=self.background,
4042                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4043                                         pkg=pkg, scheduler=scheduler, settings=settings)
4044
4045                                 uninstall.start()
4046                                 retval = uninstall.wait()
4047                                 if retval != os.EX_OK:
4048                                         return retval
4049                         return os.EX_OK
4050
4051                 if build_opts.fetchonly or \
4052                         build_opts.buildpkgonly:
4053                         return self.returncode
4054
4055                 retval = self._install_task.install()
4056                 return retval
4057
4058 class PackageMerge(AsynchronousTask):
4059         """
4060         TODO: Implement asynchronous merge so that the scheduler can
4061         run while a merge is executing.
4062         """
4063
4064         __slots__ = ("merge",)
4065
4066         def _start(self):
4067
4068                 pkg = self.merge.pkg
4069                 pkg_count = self.merge.pkg_count
4070
4071                 if pkg.installed:
4072                         action_desc = "Uninstalling"
4073                         preposition = "from"
4074                         counter_str = ""
4075                 else:
4076                         action_desc = "Installing"
4077                         preposition = "to"
4078                         counter_str = "(%s of %s) " % \
4079                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4080                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4081
4082                 msg = "%s %s%s" % \
4083                         (action_desc,
4084                         counter_str,
4085                         colorize("GOOD", pkg.cpv))
4086
4087                 if pkg.root != "/":
4088                         msg += " %s %s" % (preposition, pkg.root)
4089
4090                 if not self.merge.build_opts.fetchonly and \
4091                         not self.merge.build_opts.pretend and \
4092                         not self.merge.build_opts.buildpkgonly:
4093                         self.merge.statusMessage(msg)
4094
4095                 self.returncode = self.merge.merge()
4096                 self.wait()
4097
4098 class DependencyArg(object):
4099         def __init__(self, arg=None, root_config=None):
4100                 self.arg = arg
4101                 self.root_config = root_config
4102
4103         def __str__(self):
4104                 return str(self.arg)
4105
4106 class AtomArg(DependencyArg):
4107         def __init__(self, atom=None, **kwargs):
4108                 DependencyArg.__init__(self, **kwargs)
4109                 self.atom = atom
4110                 if not isinstance(self.atom, portage.dep.Atom):
4111                         self.atom = portage.dep.Atom(self.atom)
4112                 self.set = (self.atom, )
4113
4114 class PackageArg(DependencyArg):
4115         def __init__(self, package=None, **kwargs):
4116                 DependencyArg.__init__(self, **kwargs)
4117                 self.package = package
4118                 self.atom = portage.dep.Atom("=" + package.cpv)
4119                 self.set = (self.atom, )
4120
4121 class SetArg(DependencyArg):
4122         def __init__(self, set=None, **kwargs):
4123                 DependencyArg.__init__(self, **kwargs)
4124                 self.set = set
4125                 self.name = self.arg[len(SETPREFIX):]
4126
4127 class Dependency(SlotObject):
4128         __slots__ = ("atom", "blocker", "depth",
4129                 "parent", "onlydeps", "priority", "root")
4130         def __init__(self, **kwargs):
4131                 SlotObject.__init__(self, **kwargs)
4132                 if self.priority is None:
4133                         self.priority = DepPriority()
4134                 if self.depth is None:
4135                         self.depth = 0
4136
4137 class BlockerCache(portage.cache.mappings.MutableMapping):
4138         """This caches blockers of installed packages so that dep_check does not
4139         have to be done for every single installed package on every invocation of
4140         emerge.  The cache is invalidated whenever it is detected that something
4141         has changed that might alter the results of dep_check() calls:
4142                 1) the set of installed packages (including COUNTER) has changed
4143                 2) the old-style virtuals have changed
4144         """
4145
4146         # Number of uncached packages to trigger cache update, since
4147         # it's wasteful to update it for every vdb change.
4148         _cache_threshold = 5
4149
4150         class BlockerData(object):
4151
4152                 __slots__ = ("__weakref__", "atoms", "counter")
4153
4154                 def __init__(self, counter, atoms):
4155                         self.counter = counter
4156                         self.atoms = atoms
4157
4158         def __init__(self, myroot, vardb):
4159                 self._vardb = vardb
4160                 self._virtuals = vardb.settings.getvirtuals()
4161                 self._cache_filename = os.path.join(myroot,
4162                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4163                 self._cache_version = "1"
4164                 self._cache_data = None
4165                 self._modified = set()
4166                 self._load()
4167
4168         def _load(self):
4169                 try:
4170                         f = open(self._cache_filename, mode='rb')
4171                         mypickle = pickle.Unpickler(f)
4172                         try:
4173                                 mypickle.find_global = None
4174                         except AttributeError:
4175                                 # TODO: If py3k, override Unpickler.find_class().
4176                                 pass
4177                         self._cache_data = mypickle.load()
4178                         f.close()
4179                         del f
4180                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4181                         if isinstance(e, pickle.UnpicklingError):
4182                                 writemsg("!!! Error loading '%s': %s\n" % \
4183                                         (self._cache_filename, str(e)), noiselevel=-1)
4184                         del e
4185
4186                 cache_valid = self._cache_data and \
4187                         isinstance(self._cache_data, dict) and \
4188                         self._cache_data.get("version") == self._cache_version and \
4189                         isinstance(self._cache_data.get("blockers"), dict)
4190                 if cache_valid:
4191                         # Validate all the atoms and counters so that
4192                         # corruption is detected as soon as possible.
4193                         invalid_items = set()
4194                         for k, v in self._cache_data["blockers"].iteritems():
4195                                 if not isinstance(k, basestring):
4196                                         invalid_items.add(k)
4197                                         continue
4198                                 try:
4199                                         if portage.catpkgsplit(k) is None:
4200                                                 invalid_items.add(k)
4201                                                 continue
4202                                 except portage.exception.InvalidData:
4203                                         invalid_items.add(k)
4204                                         continue
4205                                 if not isinstance(v, tuple) or \
4206                                         len(v) != 2:
4207                                         invalid_items.add(k)
4208                                         continue
4209                                 counter, atoms = v
4210                                 if not isinstance(counter, (int, long)):
4211                                         invalid_items.add(k)
4212                                         continue
4213                                 if not isinstance(atoms, (list, tuple)):
4214                                         invalid_items.add(k)
4215                                         continue
4216                                 invalid_atom = False
4217                                 for atom in atoms:
4218                                         if not isinstance(atom, basestring):
4219                                                 invalid_atom = True
4220                                                 break
4221                                         if atom[:1] != "!" or \
4222                                                 not portage.isvalidatom(
4223                                                 atom, allow_blockers=True):
4224                                                 invalid_atom = True
4225                                                 break
4226                                 if invalid_atom:
4227                                         invalid_items.add(k)
4228                                         continue
4229
4230                         for k in invalid_items:
4231                                 del self._cache_data["blockers"][k]
4232                         if not self._cache_data["blockers"]:
4233                                 cache_valid = False
4234
4235                 if not cache_valid:
4236                         self._cache_data = {"version":self._cache_version}
4237                         self._cache_data["blockers"] = {}
4238                         self._cache_data["virtuals"] = self._virtuals
4239                 self._modified.clear()
4240
4241         def flush(self):
4242                 """If the current user has permission and the internal blocker cache
4243                 been updated, save it to disk and mark it unmodified.  This is called
4244                 by emerge after it has proccessed blockers for all installed packages.
4245                 Currently, the cache is only written if the user has superuser
4246                 privileges (since that's required to obtain a lock), but all users
4247                 have read access and benefit from faster blocker lookups (as long as
4248                 the entire cache is still valid).  The cache is stored as a pickled
4249                 dict object with the following format:
4250
4251                 {
4252                         version : "1",
4253                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4254                         "virtuals" : vardb.settings.getvirtuals()
4255                 }
4256                 """
4257                 if len(self._modified) >= self._cache_threshold and \
4258                         secpass >= 2:
4259                         try:
4260                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4261                                 pickle.dump(self._cache_data, f, protocol=2)
4262                                 f.close()
4263                                 portage.util.apply_secpass_permissions(
4264                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4265                         except (IOError, OSError), e:
4266                                 pass
4267                         self._modified.clear()
4268
4269         def __setitem__(self, cpv, blocker_data):
4270                 """
4271                 Update the cache and mark it as modified for a future call to
4272                 self.flush().
4273
4274                 @param cpv: Package for which to cache blockers.
4275                 @type cpv: String
4276                 @param blocker_data: An object with counter and atoms attributes.
4277                 @type blocker_data: BlockerData
4278                 """
4279                 self._cache_data["blockers"][cpv] = \
4280                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4281                 self._modified.add(cpv)
4282
4283         def __iter__(self):
4284                 if self._cache_data is None:
4285                         # triggered by python-trace
4286                         return iter([])
4287                 return iter(self._cache_data["blockers"])
4288
4289         def __delitem__(self, cpv):
4290                 del self._cache_data["blockers"][cpv]
4291
4292         def __getitem__(self, cpv):
4293                 """
4294                 @rtype: BlockerData
4295                 @returns: An object with counter and atoms attributes.
4296                 """
4297                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4298
4299 class BlockerDB(object):
4300
4301         def __init__(self, root_config):
4302                 self._root_config = root_config
4303                 self._vartree = root_config.trees["vartree"]
4304                 self._portdb = root_config.trees["porttree"].dbapi
4305
4306                 self._dep_check_trees = None
4307                 self._fake_vartree = None
4308
4309         def _get_fake_vartree(self, acquire_lock=0):
4310                 fake_vartree = self._fake_vartree
4311                 if fake_vartree is None:
4312                         fake_vartree = FakeVartree(self._root_config,
4313                                 acquire_lock=acquire_lock)
4314                         self._fake_vartree = fake_vartree
4315                         self._dep_check_trees = { self._vartree.root : {
4316                                 "porttree"    :  fake_vartree,
4317                                 "vartree"     :  fake_vartree,
4318                         }}
4319                 else:
4320                         fake_vartree.sync(acquire_lock=acquire_lock)
4321                 return fake_vartree
4322
4323         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4324                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4325                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4326                 settings = self._vartree.settings
4327                 stale_cache = set(blocker_cache)
4328                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4329                 dep_check_trees = self._dep_check_trees
4330                 vardb = fake_vartree.dbapi
4331                 installed_pkgs = list(vardb)
4332
4333                 for inst_pkg in installed_pkgs:
4334                         stale_cache.discard(inst_pkg.cpv)
4335                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4336                         if cached_blockers is not None and \
4337                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4338                                 cached_blockers = None
4339                         if cached_blockers is not None:
4340                                 blocker_atoms = cached_blockers.atoms
4341                         else:
4342                                 # Use aux_get() to trigger FakeVartree global
4343                                 # updates on *DEPEND when appropriate.
4344                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4345                                 try:
4346                                         portage.dep._dep_check_strict = False
4347                                         success, atoms = portage.dep_check(depstr,
4348                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4349                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4350                                 finally:
4351                                         portage.dep._dep_check_strict = True
4352                                 if not success:
4353                                         pkg_location = os.path.join(inst_pkg.root,
4354                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4355                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4356                                                 (pkg_location, atoms), noiselevel=-1)
4357                                         continue
4358
4359                                 blocker_atoms = [atom for atom in atoms \
4360                                         if atom.startswith("!")]
4361                                 blocker_atoms.sort()
4362                                 counter = long(inst_pkg.metadata["COUNTER"])
4363                                 blocker_cache[inst_pkg.cpv] = \
4364                                         blocker_cache.BlockerData(counter, blocker_atoms)
4365                 for cpv in stale_cache:
4366                         del blocker_cache[cpv]
4367                 blocker_cache.flush()
4368
4369                 blocker_parents = digraph()
4370                 blocker_atoms = []
4371                 for pkg in installed_pkgs:
4372                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4373                                 blocker_atom = blocker_atom.lstrip("!")
4374                                 blocker_atoms.append(blocker_atom)
4375                                 blocker_parents.add(blocker_atom, pkg)
4376
4377                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4378                 blocking_pkgs = set()
4379                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4380                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4381
4382                 # Check for blockers in the other direction.
4383                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4384                 try:
4385                         portage.dep._dep_check_strict = False
4386                         success, atoms = portage.dep_check(depstr,
4387                                 vardb, settings, myuse=new_pkg.use.enabled,
4388                                 trees=dep_check_trees, myroot=new_pkg.root)
4389                 finally:
4390                         portage.dep._dep_check_strict = True
4391                 if not success:
4392                         # We should never get this far with invalid deps.
4393                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4394                         assert False
4395
4396                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4397                         if atom[:1] == "!"]
4398                 if blocker_atoms:
4399                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4400                         for inst_pkg in installed_pkgs:
4401                                 try:
4402                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4403                                 except (portage.exception.InvalidDependString, StopIteration):
4404                                         continue
4405                                 blocking_pkgs.add(inst_pkg)
4406
4407                 return blocking_pkgs
4408
4409 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4410
4411         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4412                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4413         p_type, p_root, p_key, p_status = parent_node
4414         msg = []
4415         if p_status == "nomerge":
4416                 category, pf = portage.catsplit(p_key)
4417                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4418                 msg.append("Portage is unable to process the dependencies of the ")
4419                 msg.append("'%s' package. " % p_key)
4420                 msg.append("In order to correct this problem, the package ")
4421                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4422                 msg.append("As a temporary workaround, the --nodeps option can ")
4423                 msg.append("be used to ignore all dependencies.  For reference, ")
4424                 msg.append("the problematic dependencies can be found in the ")
4425                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4426         else:
4427                 msg.append("This package can not be installed. ")
4428                 msg.append("Please notify the '%s' package maintainer " % p_key)
4429                 msg.append("about this problem.")
4430
4431         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4432         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4433
4434 class PackageVirtualDbapi(portage.dbapi):
4435         """
4436         A dbapi-like interface class that represents the state of the installed
4437         package database as new packages are installed, replacing any packages
4438         that previously existed in the same slot. The main difference between
4439         this class and fakedbapi is that this one uses Package instances
4440         internally (passed in via cpv_inject() and cpv_remove() calls).
4441         """
4442         def __init__(self, settings):
4443                 portage.dbapi.__init__(self)
4444                 self.settings = settings
4445                 self._match_cache = {}
4446                 self._cp_map = {}
4447                 self._cpv_map = {}
4448
4449         def clear(self):
4450                 """
4451                 Remove all packages.
4452                 """
4453                 if self._cpv_map:
4454                         self._clear_cache()
4455                         self._cp_map.clear()
4456                         self._cpv_map.clear()
4457
4458         def copy(self):
4459                 obj = PackageVirtualDbapi(self.settings)
4460                 obj._match_cache = self._match_cache.copy()
4461                 obj._cp_map = self._cp_map.copy()
4462                 for k, v in obj._cp_map.iteritems():
4463                         obj._cp_map[k] = v[:]
4464                 obj._cpv_map = self._cpv_map.copy()
4465                 return obj
4466
4467         def __iter__(self):
4468                 return self._cpv_map.itervalues()
4469
4470         def __contains__(self, item):
4471                 existing = self._cpv_map.get(item.cpv)
4472                 if existing is not None and \
4473                         existing == item:
4474                         return True
4475                 return False
4476
4477         def get(self, item, default=None):
4478                 cpv = getattr(item, "cpv", None)
4479                 if cpv is None:
4480                         if len(item) != 4:
4481                                 return default
4482                         type_name, root, cpv, operation = item
4483
4484                 existing = self._cpv_map.get(cpv)
4485                 if existing is not None and \
4486                         existing == item:
4487                         return existing
4488                 return default
4489
4490         def match_pkgs(self, atom):
4491                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4492
4493         def _clear_cache(self):
4494                 if self._categories is not None:
4495                         self._categories = None
4496                 if self._match_cache:
4497                         self._match_cache = {}
4498
4499         def match(self, origdep, use_cache=1):
4500                 result = self._match_cache.get(origdep)
4501                 if result is not None:
4502                         return result[:]
4503                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4504                 self._match_cache[origdep] = result
4505                 return result[:]
4506
4507         def cpv_exists(self, cpv):
4508                 return cpv in self._cpv_map
4509
4510         def cp_list(self, mycp, use_cache=1):
4511                 cachelist = self._match_cache.get(mycp)
4512                 # cp_list() doesn't expand old-style virtuals
4513                 if cachelist and cachelist[0].startswith(mycp):
4514                         return cachelist[:]
4515                 cpv_list = self._cp_map.get(mycp)
4516                 if cpv_list is None:
4517                         cpv_list = []
4518                 else:
4519                         cpv_list = [pkg.cpv for pkg in cpv_list]
4520                 self._cpv_sort_ascending(cpv_list)
4521                 if not (not cpv_list and mycp.startswith("virtual/")):
4522                         self._match_cache[mycp] = cpv_list
4523                 return cpv_list[:]
4524
4525         def cp_all(self):
4526                 return list(self._cp_map)
4527
4528         def cpv_all(self):
4529                 return list(self._cpv_map)
4530
4531         def cpv_inject(self, pkg):
4532                 cp_list = self._cp_map.get(pkg.cp)
4533                 if cp_list is None:
4534                         cp_list = []
4535                         self._cp_map[pkg.cp] = cp_list
4536                 e_pkg = self._cpv_map.get(pkg.cpv)
4537                 if e_pkg is not None:
4538                         if e_pkg == pkg:
4539                                 return
4540                         self.cpv_remove(e_pkg)
4541                 for e_pkg in cp_list:
4542                         if e_pkg.slot_atom == pkg.slot_atom:
4543                                 if e_pkg == pkg:
4544                                         return
4545                                 self.cpv_remove(e_pkg)
4546                                 break
4547                 cp_list.append(pkg)
4548                 self._cpv_map[pkg.cpv] = pkg
4549                 self._clear_cache()
4550
4551         def cpv_remove(self, pkg):
4552                 old_pkg = self._cpv_map.get(pkg.cpv)
4553                 if old_pkg != pkg:
4554                         raise KeyError(pkg)
4555                 self._cp_map[pkg.cp].remove(pkg)
4556                 del self._cpv_map[pkg.cpv]
4557                 self._clear_cache()
4558
4559         def aux_get(self, cpv, wants):
4560                 metadata = self._cpv_map[cpv].metadata
4561                 return [metadata.get(x, "") for x in wants]
4562
4563         def aux_update(self, cpv, values):
4564                 self._cpv_map[cpv].metadata.update(values)
4565                 self._clear_cache()
4566
4567 class depgraph(object):
4568
4569         pkg_tree_map = RootConfig.pkg_tree_map
4570
4571         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4572
4573         def __init__(self, settings, trees, myopts, myparams, spinner):
4574                 self.settings = settings
4575                 self.target_root = settings["ROOT"]
4576                 self.myopts = myopts
4577                 self.myparams = myparams
4578                 self.edebug = 0
4579                 if settings.get("PORTAGE_DEBUG", "") == "1":
4580                         self.edebug = 1
4581                 self.spinner = spinner
4582                 self._running_root = trees["/"]["root_config"]
4583                 self._opts_no_restart = Scheduler._opts_no_restart
4584                 self.pkgsettings = {}
4585                 # Maps slot atom to package for each Package added to the graph.
4586                 self._slot_pkg_map = {}
4587                 # Maps nodes to the reasons they were selected for reinstallation.
4588                 self._reinstall_nodes = {}
4589                 self.mydbapi = {}
4590                 self.trees = {}
4591                 self._trees_orig = trees
4592                 self.roots = {}
4593                 # Contains a filtered view of preferred packages that are selected
4594                 # from available repositories.
4595                 self._filtered_trees = {}
4596                 # Contains installed packages and new packages that have been added
4597                 # to the graph.
4598                 self._graph_trees = {}
4599                 # All Package instances
4600                 self._pkg_cache = {}
4601                 for myroot in trees:
4602                         self.trees[myroot] = {}
4603                         # Create a RootConfig instance that references
4604                         # the FakeVartree instead of the real one.
4605                         self.roots[myroot] = RootConfig(
4606                                 trees[myroot]["vartree"].settings,
4607                                 self.trees[myroot],
4608                                 trees[myroot]["root_config"].setconfig)
4609                         for tree in ("porttree", "bintree"):
4610                                 self.trees[myroot][tree] = trees[myroot][tree]
4611                         self.trees[myroot]["vartree"] = \
4612                                 FakeVartree(trees[myroot]["root_config"],
4613                                         pkg_cache=self._pkg_cache)
4614                         self.pkgsettings[myroot] = portage.config(
4615                                 clone=self.trees[myroot]["vartree"].settings)
4616                         self._slot_pkg_map[myroot] = {}
4617                         vardb = self.trees[myroot]["vartree"].dbapi
4618                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4619                                 "--buildpkgonly" not in self.myopts
4620                         # This fakedbapi instance will model the state that the vdb will
4621                         # have after new packages have been installed.
4622                         fakedb = PackageVirtualDbapi(vardb.settings)
4623                         if preload_installed_pkgs:
4624                                 for pkg in vardb:
4625                                         self.spinner.update()
4626                                         # This triggers metadata updates via FakeVartree.
4627                                         vardb.aux_get(pkg.cpv, [])
4628                                         fakedb.cpv_inject(pkg)
4629
4630                         # Now that the vardb state is cached in our FakeVartree,
4631                         # we won't be needing the real vartree cache for awhile.
4632                         # To make some room on the heap, clear the vardbapi
4633                         # caches.
4634                         trees[myroot]["vartree"].dbapi._clear_cache()
4635                         gc.collect()
4636
4637                         self.mydbapi[myroot] = fakedb
4638                         def graph_tree():
4639                                 pass
4640                         graph_tree.dbapi = fakedb
4641                         self._graph_trees[myroot] = {}
4642                         self._filtered_trees[myroot] = {}
4643                         # Substitute the graph tree for the vartree in dep_check() since we
4644                         # want atom selections to be consistent with package selections
4645                         # have already been made.
4646                         self._graph_trees[myroot]["porttree"]   = graph_tree
4647                         self._graph_trees[myroot]["vartree"]    = graph_tree
4648                         def filtered_tree():
4649                                 pass
4650                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4651                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4652
4653                         # Passing in graph_tree as the vartree here could lead to better
4654                         # atom selections in some cases by causing atoms for packages that
4655                         # have been added to the graph to be preferred over other choices.
4656                         # However, it can trigger atom selections that result in
4657                         # unresolvable direct circular dependencies. For example, this
4658                         # happens with gwydion-dylan which depends on either itself or
4659                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4660                         # gwydion-dylan-bin needs to be selected in order to avoid a
4661                         # an unresolvable direct circular dependency.
4662                         #
4663                         # To solve the problem described above, pass in "graph_db" so that
4664                         # packages that have been added to the graph are distinguishable
4665                         # from other available packages and installed packages. Also, pass
4666                         # the parent package into self._select_atoms() calls so that
4667                         # unresolvable direct circular dependencies can be detected and
4668                         # avoided when possible.
4669                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4670                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4671
4672                         dbs = []
4673                         portdb = self.trees[myroot]["porttree"].dbapi
4674                         bindb  = self.trees[myroot]["bintree"].dbapi
4675                         vardb  = self.trees[myroot]["vartree"].dbapi
4676                         #               (db, pkg_type, built, installed, db_keys)
4677                         if "--usepkgonly" not in self.myopts:
4678                                 db_keys = list(portdb._aux_cache_keys)
4679                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4680                         if "--usepkg" in self.myopts:
4681                                 db_keys = list(bindb._aux_cache_keys)
4682                                 dbs.append((bindb,  "binary", True, False, db_keys))
4683                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4684                         dbs.append((vardb, "installed", True, True, db_keys))
4685                         self._filtered_trees[myroot]["dbs"] = dbs
4686                         if "--usepkg" in self.myopts:
4687                                 self.trees[myroot]["bintree"].populate(
4688                                         "--getbinpkg" in self.myopts,
4689                                         "--getbinpkgonly" in self.myopts)
4690                 del trees
4691
4692                 self.digraph=portage.digraph()
4693                 # contains all sets added to the graph
4694                 self._sets = {}
4695                 # contains atoms given as arguments
4696                 self._sets["args"] = InternalPackageSet()
4697                 # contains all atoms from all sets added to the graph, including
4698                 # atoms given as arguments
4699                 self._set_atoms = InternalPackageSet()
4700                 self._atom_arg_map = {}
4701                 # contains all nodes pulled in by self._set_atoms
4702                 self._set_nodes = set()
4703                 # Contains only Blocker -> Uninstall edges
4704                 self._blocker_uninstalls = digraph()
4705                 # Contains only Package -> Blocker edges
4706                 self._blocker_parents = digraph()
4707                 # Contains only irrelevant Package -> Blocker edges
4708                 self._irrelevant_blockers = digraph()
4709                 # Contains only unsolvable Package -> Blocker edges
4710                 self._unsolvable_blockers = digraph()
4711                 # Contains all Blocker -> Blocked Package edges
4712                 self._blocked_pkgs = digraph()
4713                 # Contains world packages that have been protected from
4714                 # uninstallation but may not have been added to the graph
4715                 # if the graph is not complete yet.
4716                 self._blocked_world_pkgs = {}
4717                 self._slot_collision_info = {}
4718                 # Slot collision nodes are not allowed to block other packages since
4719                 # blocker validation is only able to account for one package per slot.
4720                 self._slot_collision_nodes = set()
4721                 self._parent_atoms = {}
4722                 self._slot_conflict_parent_atoms = set()
4723                 self._serialized_tasks_cache = None
4724                 self._scheduler_graph = None
4725                 self._displayed_list = None
4726                 self._pprovided_args = []
4727                 self._missing_args = []
4728                 self._masked_installed = set()
4729                 self._unsatisfied_deps_for_display = []
4730                 self._unsatisfied_blockers_for_display = None
4731                 self._circular_deps_for_display = None
4732                 self._dep_stack = []
4733                 self._unsatisfied_deps = []
4734                 self._initially_unsatisfied_deps = []
4735                 self._ignored_deps = []
4736                 self._required_set_names = set(["system", "world"])
4737                 self._select_atoms = self._select_atoms_highest_available
4738                 self._select_package = self._select_pkg_highest_available
4739                 self._highest_pkg_cache = {}
4740
4741         def _show_slot_collision_notice(self):
4742                 """Show an informational message advising the user to mask one of the
4743                 the packages. In some cases it may be possible to resolve this
4744                 automatically, but support for backtracking (removal nodes that have
4745                 already been selected) will be required in order to handle all possible
4746                 cases.
4747                 """
4748
4749                 if not self._slot_collision_info:
4750                         return
4751
4752                 self._show_merge_list()
4753
4754                 msg = []
4755                 msg.append("\n!!! Multiple package instances within a single " + \
4756                         "package slot have been pulled\n")
4757                 msg.append("!!! into the dependency graph, resulting" + \
4758                         " in a slot conflict:\n\n")
4759                 indent = "  "
4760                 # Max number of parents shown, to avoid flooding the display.
4761                 max_parents = 3
4762                 explanation_columns = 70
4763                 explanations = 0
4764                 for (slot_atom, root), slot_nodes \
4765                         in self._slot_collision_info.iteritems():
4766                         msg.append(str(slot_atom))
4767                         msg.append("\n\n")
4768
4769                         for node in slot_nodes:
4770                                 msg.append(indent)
4771                                 msg.append(str(node))
4772                                 parent_atoms = self._parent_atoms.get(node)
4773                                 if parent_atoms:
4774                                         pruned_list = set()
4775                                         # Prefer conflict atoms over others.
4776                                         for parent_atom in parent_atoms:
4777                                                 if len(pruned_list) >= max_parents:
4778                                                         break
4779                                                 if parent_atom in self._slot_conflict_parent_atoms:
4780                                                         pruned_list.add(parent_atom)
4781
4782                                         # If this package was pulled in by conflict atoms then
4783                                         # show those alone since those are the most interesting.
4784                                         if not pruned_list:
4785                                                 # When generating the pruned list, prefer instances
4786                                                 # of DependencyArg over instances of Package.
4787                                                 for parent_atom in parent_atoms:
4788                                                         if len(pruned_list) >= max_parents:
4789                                                                 break
4790                                                         parent, atom = parent_atom
4791                                                         if isinstance(parent, DependencyArg):
4792                                                                 pruned_list.add(parent_atom)
4793                                                 # Prefer Packages instances that themselves have been
4794                                                 # pulled into collision slots.
4795                                                 for parent_atom in parent_atoms:
4796                                                         if len(pruned_list) >= max_parents:
4797                                                                 break
4798                                                         parent, atom = parent_atom
4799                                                         if isinstance(parent, Package) and \
4800                                                                 (parent.slot_atom, parent.root) \
4801                                                                 in self._slot_collision_info:
4802                                                                 pruned_list.add(parent_atom)
4803                                                 for parent_atom in parent_atoms:
4804                                                         if len(pruned_list) >= max_parents:
4805                                                                 break
4806                                                         pruned_list.add(parent_atom)
4807                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4808                                         parent_atoms = pruned_list
4809                                         msg.append(" pulled in by\n")
4810                                         for parent_atom in parent_atoms:
4811                                                 parent, atom = parent_atom
4812                                                 msg.append(2*indent)
4813                                                 if isinstance(parent,
4814                                                         (PackageArg, AtomArg)):
4815                                                         # For PackageArg and AtomArg types, it's
4816                                                         # redundant to display the atom attribute.
4817                                                         msg.append(str(parent))
4818                                                 else:
4819                                                         # Display the specific atom from SetArg or
4820                                                         # Package types.
4821                                                         msg.append("%s required by %s" % (atom, parent))
4822                                                 msg.append("\n")
4823                                         if omitted_parents:
4824                                                 msg.append(2*indent)
4825                                                 msg.append("(and %d more)\n" % omitted_parents)
4826                                 else:
4827                                         msg.append(" (no parents)\n")
4828                                 msg.append("\n")
4829                         explanation = self._slot_conflict_explanation(slot_nodes)
4830                         if explanation:
4831                                 explanations += 1
4832                                 msg.append(indent + "Explanation:\n\n")
4833                                 for line in textwrap.wrap(explanation, explanation_columns):
4834                                         msg.append(2*indent + line + "\n")
4835                                 msg.append("\n")
4836                 msg.append("\n")
4837                 sys.stderr.write("".join(msg))
4838                 sys.stderr.flush()
4839
4840                 explanations_for_all = explanations == len(self._slot_collision_info)
4841
4842                 if explanations_for_all or "--quiet" in self.myopts:
4843                         return
4844
4845                 msg = []
4846                 msg.append("It may be possible to solve this problem ")
4847                 msg.append("by using package.mask to prevent one of ")
4848                 msg.append("those packages from being selected. ")
4849                 msg.append("However, it is also possible that conflicting ")
4850                 msg.append("dependencies exist such that they are impossible to ")
4851                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4852                 msg.append("the dependencies of two different packages, then those ")
4853                 msg.append("packages can not be installed simultaneously.")
4854
4855                 from formatter import AbstractFormatter, DumbWriter
4856                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4857                 for x in msg:
4858                         f.add_flowing_data(x)
4859                 f.end_paragraph(1)
4860
4861                 msg = []
4862                 msg.append("For more information, see MASKED PACKAGES ")
4863                 msg.append("section in the emerge man page or refer ")
4864                 msg.append("to the Gentoo Handbook.")
4865                 for x in msg:
4866                         f.add_flowing_data(x)
4867                 f.end_paragraph(1)
4868                 f.writer.flush()
4869
4870         def _slot_conflict_explanation(self, slot_nodes):
4871                 """
4872                 When a slot conflict occurs due to USE deps, there are a few
4873                 different cases to consider:
4874
4875                 1) New USE are correctly set but --newuse wasn't requested so an
4876                    installed package with incorrect USE happened to get pulled
4877                    into graph before the new one.
4878
4879                 2) New USE are incorrectly set but an installed package has correct
4880                    USE so it got pulled into the graph, and a new instance also got
4881                    pulled in due to --newuse or an upgrade.
4882
4883                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4884                    and multiple package instances got pulled into the same slot to
4885                    satisfy the conflicting deps.
4886
4887                 Currently, explanations and suggested courses of action are generated
4888                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4889                 """
4890
4891                 if len(slot_nodes) != 2:
4892                         # Suggestions are only implemented for
4893                         # conflicts between two packages.
4894                         return None
4895
4896                 all_conflict_atoms = self._slot_conflict_parent_atoms
4897                 matched_node = None
4898                 matched_atoms = None
4899                 unmatched_node = None
4900                 for node in slot_nodes:
4901                         parent_atoms = self._parent_atoms.get(node)
4902                         if not parent_atoms:
4903                                 # Normally, there are always parent atoms. If there are
4904                                 # none then something unexpected is happening and there's
4905                                 # currently no suggestion for this case.
4906                                 return None
4907                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4908                         for parent_atom in conflict_atoms:
4909                                 parent, atom = parent_atom
4910                                 if not atom.use:
4911                                         # Suggestions are currently only implemented for cases
4912                                         # in which all conflict atoms have USE deps.
4913                                         return None
4914                         if conflict_atoms:
4915                                 if matched_node is not None:
4916                                         # If conflict atoms match multiple nodes
4917                                         # then there's no suggestion.
4918                                         return None
4919                                 matched_node = node
4920                                 matched_atoms = conflict_atoms
4921                         else:
4922                                 if unmatched_node is not None:
4923                                         # Neither node is matched by conflict atoms, and
4924                                         # there is no suggestion for this case.
4925                                         return None
4926                                 unmatched_node = node
4927
4928                 if matched_node is None or unmatched_node is None:
4929                         # This shouldn't happen.
4930                         return None
4931
4932                 if unmatched_node.installed and not matched_node.installed and \
4933                         unmatched_node.cpv == matched_node.cpv:
4934                         # If the conflicting packages are the same version then
4935                         # --newuse should be all that's needed. If they are different
4936                         # versions then there's some other problem.
4937                         return "New USE are correctly set, but --newuse wasn't" + \
4938                                 " requested, so an installed package with incorrect USE " + \
4939                                 "happened to get pulled into the dependency graph. " + \
4940                                 "In order to solve " + \
4941                                 "this, either specify the --newuse option or explicitly " + \
4942                                 " reinstall '%s'." % matched_node.slot_atom
4943
4944                 if matched_node.installed and not unmatched_node.installed:
4945                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4946                         explanation = ("New USE for '%s' are incorrectly set. " + \
4947                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4948                                 (matched_node.slot_atom, atoms[0])
4949                         if len(atoms) > 1:
4950                                 for atom in atoms[1:-1]:
4951                                         explanation += ", '%s'" % (atom,)
4952                                 if len(atoms) > 2:
4953                                         explanation += ","
4954                                 explanation += " and '%s'" % (atoms[-1],)
4955                         explanation += "."
4956                         return explanation
4957
4958                 return None
4959
4960         def _process_slot_conflicts(self):
4961                 """
4962                 Process slot conflict data to identify specific atoms which
4963                 lead to conflict. These atoms only match a subset of the
4964                 packages that have been pulled into a given slot.
4965                 """
4966                 for (slot_atom, root), slot_nodes \
4967                         in self._slot_collision_info.iteritems():
4968
4969                         all_parent_atoms = set()
4970                         for pkg in slot_nodes:
4971                                 parent_atoms = self._parent_atoms.get(pkg)
4972                                 if not parent_atoms:
4973                                         continue
4974                                 all_parent_atoms.update(parent_atoms)
4975
4976                         for pkg in slot_nodes:
4977                                 parent_atoms = self._parent_atoms.get(pkg)
4978                                 if parent_atoms is None:
4979                                         parent_atoms = set()
4980                                         self._parent_atoms[pkg] = parent_atoms
4981                                 for parent_atom in all_parent_atoms:
4982                                         if parent_atom in parent_atoms:
4983                                                 continue
4984                                         # Use package set for matching since it will match via
4985                                         # PROVIDE when necessary, while match_from_list does not.
4986                                         parent, atom = parent_atom
4987                                         atom_set = InternalPackageSet(
4988                                                 initial_atoms=(atom,))
4989                                         if atom_set.findAtomForPackage(pkg):
4990                                                 parent_atoms.add(parent_atom)
4991                                         else:
4992                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4993
4994         def _reinstall_for_flags(self, forced_flags,
4995                 orig_use, orig_iuse, cur_use, cur_iuse):
4996                 """Return a set of flags that trigger reinstallation, or None if there
4997                 are no such flags."""
4998                 if "--newuse" in self.myopts:
4999                         flags = set(orig_iuse.symmetric_difference(
5000                                 cur_iuse).difference(forced_flags))
5001                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5002                                 cur_iuse.intersection(cur_use)))
5003                         if flags:
5004                                 return flags
5005                 elif "changed-use" == self.myopts.get("--reinstall"):
5006                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
5007                                 cur_iuse.intersection(cur_use))
5008                         if flags:
5009                                 return flags
5010                 return None
5011
5012         def _create_graph(self, allow_unsatisfied=False):
5013                 dep_stack = self._dep_stack
5014                 while dep_stack:
5015                         self.spinner.update()
5016                         dep = dep_stack.pop()
5017                         if isinstance(dep, Package):
5018                                 if not self._add_pkg_deps(dep,
5019                                         allow_unsatisfied=allow_unsatisfied):
5020                                         return 0
5021                                 continue
5022                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5023                                 return 0
5024                 return 1
5025
5026         def _add_dep(self, dep, allow_unsatisfied=False):
5027                 debug = "--debug" in self.myopts
5028                 buildpkgonly = "--buildpkgonly" in self.myopts
5029                 nodeps = "--nodeps" in self.myopts
5030                 empty = "empty" in self.myparams
5031                 deep = "deep" in self.myparams
5032                 update = "--update" in self.myopts and dep.depth <= 1
5033                 if dep.blocker:
5034                         if not buildpkgonly and \
5035                                 not nodeps and \
5036                                 dep.parent not in self._slot_collision_nodes:
5037                                 if dep.parent.onlydeps:
5038                                         # It's safe to ignore blockers if the
5039                                         # parent is an --onlydeps node.
5040                                         return 1
5041                                 # The blocker applies to the root where
5042                                 # the parent is or will be installed.
5043                                 blocker = Blocker(atom=dep.atom,
5044                                         eapi=dep.parent.metadata["EAPI"],
5045                                         root=dep.parent.root)
5046                                 self._blocker_parents.add(blocker, dep.parent)
5047                         return 1
5048                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5049                         onlydeps=dep.onlydeps)
5050                 if not dep_pkg:
5051                         if dep.priority.optional:
5052                                 # This could be an unecessary build-time dep
5053                                 # pulled in by --with-bdeps=y.
5054                                 return 1
5055                         if allow_unsatisfied:
5056                                 self._unsatisfied_deps.append(dep)
5057                                 return 1
5058                         self._unsatisfied_deps_for_display.append(
5059                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5060                         return 0
5061                 # In some cases, dep_check will return deps that shouldn't
5062                 # be proccessed any further, so they are identified and
5063                 # discarded here. Try to discard as few as possible since
5064                 # discarded dependencies reduce the amount of information
5065                 # available for optimization of merge order.
5066                 if dep.priority.satisfied and \
5067                         not dep_pkg.installed and \
5068                         not (existing_node or empty or deep or update):
5069                         myarg = None
5070                         if dep.root == self.target_root:
5071                                 try:
5072                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5073                                 except StopIteration:
5074                                         pass
5075                                 except portage.exception.InvalidDependString:
5076                                         if not dep_pkg.installed:
5077                                                 # This shouldn't happen since the package
5078                                                 # should have been masked.
5079                                                 raise
5080                         if not myarg:
5081                                 self._ignored_deps.append(dep)
5082                                 return 1
5083
5084                 if not self._add_pkg(dep_pkg, dep):
5085                         return 0
5086                 return 1
5087
5088         def _add_pkg(self, pkg, dep):
5089                 myparent = None
5090                 priority = None
5091                 depth = 0
5092                 if dep is None:
5093                         dep = Dependency()
5094                 else:
5095                         myparent = dep.parent
5096                         priority = dep.priority
5097                         depth = dep.depth
5098                 if priority is None:
5099                         priority = DepPriority()
5100                 """
5101                 Fills the digraph with nodes comprised of packages to merge.
5102                 mybigkey is the package spec of the package to merge.
5103                 myparent is the package depending on mybigkey ( or None )
5104                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5105                         Think --onlydeps, we need to ignore packages in that case.
5106                 #stuff to add:
5107                 #SLOT-aware emerge
5108                 #IUSE-aware emerge -> USE DEP aware depgraph
5109                 #"no downgrade" emerge
5110                 """
5111                 # Ensure that the dependencies of the same package
5112                 # are never processed more than once.
5113                 previously_added = pkg in self.digraph
5114
5115                 # select the correct /var database that we'll be checking against
5116                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5117                 pkgsettings = self.pkgsettings[pkg.root]
5118
5119                 arg_atoms = None
5120                 if True:
5121                         try:
5122                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5123                         except portage.exception.InvalidDependString, e:
5124                                 if not pkg.installed:
5125                                         show_invalid_depstring_notice(
5126                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5127                                         return 0
5128                                 del e
5129
5130                 if not pkg.onlydeps:
5131                         if not pkg.installed and \
5132                                 "empty" not in self.myparams and \
5133                                 vardbapi.match(pkg.slot_atom):
5134                                 # Increase the priority of dependencies on packages that
5135                                 # are being rebuilt. This optimizes merge order so that
5136                                 # dependencies are rebuilt/updated as soon as possible,
5137                                 # which is needed especially when emerge is called by
5138                                 # revdep-rebuild since dependencies may be affected by ABI
5139                                 # breakage that has rendered them useless. Don't adjust
5140                                 # priority here when in "empty" mode since all packages
5141                                 # are being merged in that case.
5142                                 priority.rebuild = True
5143
5144                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5145                         slot_collision = False
5146                         if existing_node:
5147                                 existing_node_matches = pkg.cpv == existing_node.cpv
5148                                 if existing_node_matches and \
5149                                         pkg != existing_node and \
5150                                         dep.atom is not None:
5151                                         # Use package set for matching since it will match via
5152                                         # PROVIDE when necessary, while match_from_list does not.
5153                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5154                                         if not atom_set.findAtomForPackage(existing_node):
5155                                                 existing_node_matches = False
5156                                 if existing_node_matches:
5157                                         # The existing node can be reused.
5158                                         if arg_atoms:
5159                                                 for parent_atom in arg_atoms:
5160                                                         parent, atom = parent_atom
5161                                                         self.digraph.add(existing_node, parent,
5162                                                                 priority=priority)
5163                                                         self._add_parent_atom(existing_node, parent_atom)
5164                                         # If a direct circular dependency is not an unsatisfied
5165                                         # buildtime dependency then drop it here since otherwise
5166                                         # it can skew the merge order calculation in an unwanted
5167                                         # way.
5168                                         if existing_node != myparent or \
5169                                                 (priority.buildtime and not priority.satisfied):
5170                                                 self.digraph.addnode(existing_node, myparent,
5171                                                         priority=priority)
5172                                                 if dep.atom is not None and dep.parent is not None:
5173                                                         self._add_parent_atom(existing_node,
5174                                                                 (dep.parent, dep.atom))
5175                                         return 1
5176                                 else:
5177
5178                                         # A slot collision has occurred.  Sometimes this coincides
5179                                         # with unresolvable blockers, so the slot collision will be
5180                                         # shown later if there are no unresolvable blockers.
5181                                         self._add_slot_conflict(pkg)
5182                                         slot_collision = True
5183
5184                         if slot_collision:
5185                                 # Now add this node to the graph so that self.display()
5186                                 # can show use flags and --tree portage.output.  This node is
5187                                 # only being partially added to the graph.  It must not be
5188                                 # allowed to interfere with the other nodes that have been
5189                                 # added.  Do not overwrite data for existing nodes in
5190                                 # self.mydbapi since that data will be used for blocker
5191                                 # validation.
5192                                 # Even though the graph is now invalid, continue to process
5193                                 # dependencies so that things like --fetchonly can still
5194                                 # function despite collisions.
5195                                 pass
5196                         elif not previously_added:
5197                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5198                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5199                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5200
5201                         if not pkg.installed:
5202                                 # Allow this package to satisfy old-style virtuals in case it
5203                                 # doesn't already. Any pre-existing providers will be preferred
5204                                 # over this one.
5205                                 try:
5206                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5207                                         # For consistency, also update the global virtuals.
5208                                         settings = self.roots[pkg.root].settings
5209                                         settings.unlock()
5210                                         settings.setinst(pkg.cpv, pkg.metadata)
5211                                         settings.lock()
5212                                 except portage.exception.InvalidDependString, e:
5213                                         show_invalid_depstring_notice(
5214                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5215                                         del e
5216                                         return 0
5217
5218                 if arg_atoms:
5219                         self._set_nodes.add(pkg)
5220
5221                 # Do this even when addme is False (--onlydeps) so that the
5222                 # parent/child relationship is always known in case
5223                 # self._show_slot_collision_notice() needs to be called later.
5224                 self.digraph.add(pkg, myparent, priority=priority)
5225                 if dep.atom is not None and dep.parent is not None:
5226                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5227
5228                 if arg_atoms:
5229                         for parent_atom in arg_atoms:
5230                                 parent, atom = parent_atom
5231                                 self.digraph.add(pkg, parent, priority=priority)
5232                                 self._add_parent_atom(pkg, parent_atom)
5233
5234                 """ This section determines whether we go deeper into dependencies or not.
5235                     We want to go deeper on a few occasions:
5236                     Installing package A, we need to make sure package A's deps are met.
5237                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5238                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5239                 """
5240                 dep_stack = self._dep_stack
5241                 if "recurse" not in self.myparams:
5242                         return 1
5243                 elif pkg.installed and \
5244                         "deep" not in self.myparams:
5245                         dep_stack = self._ignored_deps
5246
5247                 self.spinner.update()
5248
5249                 if arg_atoms:
5250                         depth = 0
5251                 pkg.depth = depth
5252                 if not previously_added:
5253                         dep_stack.append(pkg)
5254                 return 1
5255
5256         def _add_parent_atom(self, pkg, parent_atom):
5257                 parent_atoms = self._parent_atoms.get(pkg)
5258                 if parent_atoms is None:
5259                         parent_atoms = set()
5260                         self._parent_atoms[pkg] = parent_atoms
5261                 parent_atoms.add(parent_atom)
5262
5263         def _add_slot_conflict(self, pkg):
5264                 self._slot_collision_nodes.add(pkg)
5265                 slot_key = (pkg.slot_atom, pkg.root)
5266                 slot_nodes = self._slot_collision_info.get(slot_key)
5267                 if slot_nodes is None:
5268                         slot_nodes = set()
5269                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5270                         self._slot_collision_info[slot_key] = slot_nodes
5271                 slot_nodes.add(pkg)
5272
5273         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5274
5275                 mytype = pkg.type_name
5276                 myroot = pkg.root
5277                 mykey = pkg.cpv
5278                 metadata = pkg.metadata
5279                 myuse = pkg.use.enabled
5280                 jbigkey = pkg
5281                 depth = pkg.depth + 1
5282                 removal_action = "remove" in self.myparams
5283
5284                 edepend={}
5285                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5286                 for k in depkeys:
5287                         edepend[k] = metadata[k]
5288
5289                 if not pkg.built and \
5290                         "--buildpkgonly" in self.myopts and \
5291                         "deep" not in self.myparams and \
5292                         "empty" not in self.myparams:
5293                         edepend["RDEPEND"] = ""
5294                         edepend["PDEPEND"] = ""
5295                 bdeps_optional = False
5296
5297                 if pkg.built and not removal_action:
5298                         if self.myopts.get("--with-bdeps", "n") == "y":
5299                                 # Pull in build time deps as requested, but marked them as
5300                                 # "optional" since they are not strictly required. This allows
5301                                 # more freedom in the merge order calculation for solving
5302                                 # circular dependencies. Don't convert to PDEPEND since that
5303                                 # could make --with-bdeps=y less effective if it is used to
5304                                 # adjust merge order to prevent built_with_use() calls from
5305                                 # failing.
5306                                 bdeps_optional = True
5307                         else:
5308                                 # built packages do not have build time dependencies.
5309                                 edepend["DEPEND"] = ""
5310
5311                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5312                         edepend["DEPEND"] = ""
5313
5314                 bdeps_root = "/"
5315                 root_deps = self.myopts.get("--root-deps")
5316                 if root_deps is not None:
5317                         if root_deps is True:
5318                                 bdeps_root = myroot
5319                         elif root_deps == "rdeps":
5320                                 edepend["DEPEND"] = ""
5321
5322                 deps = (
5323                         (bdeps_root, edepend["DEPEND"],
5324                                 self._priority(buildtime=(not bdeps_optional),
5325                                 optional=bdeps_optional)),
5326                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5327                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5328                 )
5329
5330                 debug = "--debug" in self.myopts
5331                 strict = mytype != "installed"
5332                 try:
5333                         for dep_root, dep_string, dep_priority in deps:
5334                                 if not dep_string:
5335                                         continue
5336                                 if debug:
5337                                         print
5338                                         print "Parent:   ", jbigkey
5339                                         print "Depstring:", dep_string
5340                                         print "Priority:", dep_priority
5341                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5342                                 try:
5343                                         selected_atoms = self._select_atoms(dep_root,
5344                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5345                                                 priority=dep_priority)
5346                                 except portage.exception.InvalidDependString, e:
5347                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5348                                         return 0
5349                                 if debug:
5350                                         print "Candidates:", selected_atoms
5351
5352                                 for atom in selected_atoms:
5353                                         try:
5354
5355                                                 atom = portage.dep.Atom(atom)
5356
5357                                                 mypriority = dep_priority.copy()
5358                                                 if not atom.blocker and vardb.match(atom):
5359                                                         mypriority.satisfied = True
5360
5361                                                 if not self._add_dep(Dependency(atom=atom,
5362                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5363                                                         priority=mypriority, root=dep_root),
5364                                                         allow_unsatisfied=allow_unsatisfied):
5365                                                         return 0
5366
5367                                         except portage.exception.InvalidAtom, e:
5368                                                 show_invalid_depstring_notice(
5369                                                         pkg, dep_string, str(e))
5370                                                 del e
5371                                                 if not pkg.installed:
5372                                                         return 0
5373
5374                                 if debug:
5375                                         print "Exiting...", jbigkey
5376                 except portage.exception.AmbiguousPackageName, e:
5377                         pkgs = e.args[0]
5378                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5379                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5380                         for cpv in pkgs:
5381                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5382                         portage.writemsg("\n", noiselevel=-1)
5383                         if mytype == "binary":
5384                                 portage.writemsg(
5385                                         "!!! This binary package cannot be installed: '%s'\n" % \
5386                                         mykey, noiselevel=-1)
5387                         elif mytype == "ebuild":
5388                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5389                                 myebuild, mylocation = portdb.findname2(mykey)
5390                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5391                                         "'%s'\n" % myebuild, noiselevel=-1)
5392                         portage.writemsg("!!! Please notify the package maintainer " + \
5393                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5394                         return 0
5395                 return 1
5396
5397         def _priority(self, **kwargs):
5398                 if "remove" in self.myparams:
5399                         priority_constructor = UnmergeDepPriority
5400                 else:
5401                         priority_constructor = DepPriority
5402                 return priority_constructor(**kwargs)
5403
5404         def _dep_expand(self, root_config, atom_without_category):
5405                 """
5406                 @param root_config: a root config instance
5407                 @type root_config: RootConfig
5408                 @param atom_without_category: an atom without a category component
5409                 @type atom_without_category: String
5410                 @rtype: list
5411                 @returns: a list of atoms containing categories (possibly empty)
5412                 """
5413                 null_cp = portage.dep_getkey(insert_category_into_atom(
5414                         atom_without_category, "null"))
5415                 cat, atom_pn = portage.catsplit(null_cp)
5416
5417                 dbs = self._filtered_trees[root_config.root]["dbs"]
5418                 categories = set()
5419                 for db, pkg_type, built, installed, db_keys in dbs:
5420                         for cat in db.categories:
5421                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5422                                         categories.add(cat)
5423
5424                 deps = []
5425                 for cat in categories:
5426                         deps.append(insert_category_into_atom(
5427                                 atom_without_category, cat))
5428                 return deps
5429
5430         def _have_new_virt(self, root, atom_cp):
5431                 ret = False
5432                 for db, pkg_type, built, installed, db_keys in \
5433                         self._filtered_trees[root]["dbs"]:
5434                         if db.cp_list(atom_cp):
5435                                 ret = True
5436                                 break
5437                 return ret
5438
5439         def _iter_atoms_for_pkg(self, pkg):
5440                 # TODO: add multiple $ROOT support
5441                 if pkg.root != self.target_root:
5442                         return
5443                 atom_arg_map = self._atom_arg_map
5444                 root_config = self.roots[pkg.root]
5445                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5446                         atom_cp = portage.dep_getkey(atom)
5447                         if atom_cp != pkg.cp and \
5448                                 self._have_new_virt(pkg.root, atom_cp):
5449                                 continue
5450                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5451                         visible_pkgs.reverse() # descending order
5452                         higher_slot = None
5453                         for visible_pkg in visible_pkgs:
5454                                 if visible_pkg.cp != atom_cp:
5455                                         continue
5456                                 if pkg >= visible_pkg:
5457                                         # This is descending order, and we're not
5458                                         # interested in any versions <= pkg given.
5459                                         break
5460                                 if pkg.slot_atom != visible_pkg.slot_atom:
5461                                         higher_slot = visible_pkg
5462                                         break
5463                         if higher_slot is not None:
5464                                 continue
5465                         for arg in atom_arg_map[(atom, pkg.root)]:
5466                                 if isinstance(arg, PackageArg) and \
5467                                         arg.package != pkg:
5468                                         continue
5469                                 yield arg, atom
5470
5471         def select_files(self, myfiles):
5472                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5473                 appropriate depgraph and return a favorite list."""
5474                 debug = "--debug" in self.myopts
5475                 root_config = self.roots[self.target_root]
5476                 sets = root_config.sets
5477                 getSetAtoms = root_config.setconfig.getSetAtoms
5478                 myfavorites=[]
5479                 myroot = self.target_root
5480                 dbs = self._filtered_trees[myroot]["dbs"]
5481                 vardb = self.trees[myroot]["vartree"].dbapi
5482                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5483                 portdb = self.trees[myroot]["porttree"].dbapi
5484                 bindb = self.trees[myroot]["bintree"].dbapi
5485                 pkgsettings = self.pkgsettings[myroot]
5486                 args = []
5487                 onlydeps = "--onlydeps" in self.myopts
5488                 lookup_owners = []
5489                 for x in myfiles:
5490                         ext = os.path.splitext(x)[1]
5491                         if ext==".tbz2":
5492                                 if not os.path.exists(x):
5493                                         if os.path.exists(
5494                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5495                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5496                                         elif os.path.exists(
5497                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5498                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5499                                         else:
5500                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5501                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5502                                                 return 0, myfavorites
5503                                 mytbz2=portage.xpak.tbz2(x)
5504                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5505                                 if os.path.realpath(x) != \
5506                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5507                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5508                                         return 0, myfavorites
5509                                 db_keys = list(bindb._aux_cache_keys)
5510                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5511                                 pkg = Package(type_name="binary", root_config=root_config,
5512                                         cpv=mykey, built=True, metadata=metadata,
5513                                         onlydeps=onlydeps)
5514                                 self._pkg_cache[pkg] = pkg
5515                                 args.append(PackageArg(arg=x, package=pkg,
5516                                         root_config=root_config))
5517                         elif ext==".ebuild":
5518                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5519                                 pkgdir = os.path.dirname(ebuild_path)
5520                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5521                                 cp = pkgdir[len(tree_root)+1:]
5522                                 e = portage.exception.PackageNotFound(
5523                                         ("%s is not in a valid portage tree " + \
5524                                         "hierarchy or does not exist") % x)
5525                                 if not portage.isvalidatom(cp):
5526                                         raise e
5527                                 cat = portage.catsplit(cp)[0]
5528                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5529                                 if not portage.isvalidatom("="+mykey):
5530                                         raise e
5531                                 ebuild_path = portdb.findname(mykey)
5532                                 if ebuild_path:
5533                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5534                                                 cp, os.path.basename(ebuild_path)):
5535                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5536                                                 return 0, myfavorites
5537                                         if mykey not in portdb.xmatch(
5538                                                 "match-visible", portage.dep_getkey(mykey)):
5539                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5540                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5541                                                 print colorize("BAD", "*** page for details.")
5542                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5543                                                         "Continuing...")
5544                                 else:
5545                                         raise portage.exception.PackageNotFound(
5546                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5547                                 db_keys = list(portdb._aux_cache_keys)
5548                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5549                                 pkg = Package(type_name="ebuild", root_config=root_config,
5550                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5551                                 pkgsettings.setcpv(pkg)
5552                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5553                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5554                                 self._pkg_cache[pkg] = pkg
5555                                 args.append(PackageArg(arg=x, package=pkg,
5556                                         root_config=root_config))
5557                         elif x.startswith(os.path.sep):
5558                                 if not x.startswith(myroot):
5559                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5560                                                 " $ROOT.\n") % x, noiselevel=-1)
5561                                         return 0, []
5562                                 # Queue these up since it's most efficient to handle
5563                                 # multiple files in a single iter_owners() call.
5564                                 lookup_owners.append(x)
5565                         else:
5566                                 if x in ("system", "world"):
5567                                         x = SETPREFIX + x
5568                                 if x.startswith(SETPREFIX):
5569                                         s = x[len(SETPREFIX):]
5570                                         if s not in sets:
5571                                                 raise portage.exception.PackageSetNotFound(s)
5572                                         if s in self._sets:
5573                                                 continue
5574                                         # Recursively expand sets so that containment tests in
5575                                         # self._get_parent_sets() properly match atoms in nested
5576                                         # sets (like if world contains system).
5577                                         expanded_set = InternalPackageSet(
5578                                                 initial_atoms=getSetAtoms(s))
5579                                         self._sets[s] = expanded_set
5580                                         args.append(SetArg(arg=x, set=expanded_set,
5581                                                 root_config=root_config))
5582                                         continue
5583                                 if not is_valid_package_atom(x):
5584                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5585                                                 noiselevel=-1)
5586                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5587                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5588                                         return (0,[])
5589                                 # Don't expand categories or old-style virtuals here unless
5590                                 # necessary. Expansion of old-style virtuals here causes at
5591                                 # least the following problems:
5592                                 #   1) It's more difficult to determine which set(s) an atom
5593                                 #      came from, if any.
5594                                 #   2) It takes away freedom from the resolver to choose other
5595                                 #      possible expansions when necessary.
5596                                 if "/" in x:
5597                                         args.append(AtomArg(arg=x, atom=x,
5598                                                 root_config=root_config))
5599                                         continue
5600                                 expanded_atoms = self._dep_expand(root_config, x)
5601                                 installed_cp_set = set()
5602                                 for atom in expanded_atoms:
5603                                         atom_cp = portage.dep_getkey(atom)
5604                                         if vardb.cp_list(atom_cp):
5605                                                 installed_cp_set.add(atom_cp)
5606
5607                                 if len(installed_cp_set) > 1:
5608                                         non_virtual_cps = set()
5609                                         for atom_cp in installed_cp_set:
5610                                                 if not atom_cp.startswith("virtual/"):
5611                                                         non_virtual_cps.add(atom_cp)
5612                                         if len(non_virtual_cps) == 1:
5613                                                 installed_cp_set = non_virtual_cps
5614
5615                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5616                                         installed_cp = iter(installed_cp_set).next()
5617                                         expanded_atoms = [atom for atom in expanded_atoms \
5618                                                 if portage.dep_getkey(atom) == installed_cp]
5619
5620                                 if len(expanded_atoms) > 1:
5621                                         print
5622                                         print
5623                                         ambiguous_package_name(x, expanded_atoms, root_config,
5624                                                 self.spinner, self.myopts)
5625                                         return False, myfavorites
5626                                 if expanded_atoms:
5627                                         atom = expanded_atoms[0]
5628                                 else:
5629                                         null_atom = insert_category_into_atom(x, "null")
5630                                         null_cp = portage.dep_getkey(null_atom)
5631                                         cat, atom_pn = portage.catsplit(null_cp)
5632                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5633                                         if virts_p:
5634                                                 # Allow the depgraph to choose which virtual.
5635                                                 atom = insert_category_into_atom(x, "virtual")
5636                                         else:
5637                                                 atom = insert_category_into_atom(x, "null")
5638
5639                                 args.append(AtomArg(arg=x, atom=atom,
5640                                         root_config=root_config))
5641
5642                 if lookup_owners:
5643                         relative_paths = []
5644                         search_for_multiple = False
5645                         if len(lookup_owners) > 1:
5646                                 search_for_multiple = True
5647
5648                         for x in lookup_owners:
5649                                 if not search_for_multiple and os.path.isdir(x):
5650                                         search_for_multiple = True
5651                                 relative_paths.append(x[len(myroot):])
5652
5653                         owners = set()
5654                         for pkg, relative_path in \
5655                                 real_vardb._owners.iter_owners(relative_paths):
5656                                 owners.add(pkg.mycpv)
5657                                 if not search_for_multiple:
5658                                         break
5659
5660                         if not owners:
5661                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5662                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5663                                 return 0, []
5664
5665                         for cpv in owners:
5666                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5667                                 if not slot:
5668                                         # portage now masks packages with missing slot, but it's
5669                                         # possible that one was installed by an older version
5670                                         atom = portage.cpv_getkey(cpv)
5671                                 else:
5672                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5673                                 args.append(AtomArg(arg=atom, atom=atom,
5674                                         root_config=root_config))
5675
5676                 if "--update" in self.myopts:
5677                         # In some cases, the greedy slots behavior can pull in a slot that
5678                         # the user would want to uninstall due to it being blocked by a
5679                         # newer version in a different slot. Therefore, it's necessary to
5680                         # detect and discard any that should be uninstalled. Each time
5681                         # that arguments are updated, package selections are repeated in
5682                         # order to ensure consistency with the current arguments:
5683                         #
5684                         #  1) Initialize args
5685                         #  2) Select packages and generate initial greedy atoms
5686                         #  3) Update args with greedy atoms
5687                         #  4) Select packages and generate greedy atoms again, while
5688                         #     accounting for any blockers between selected packages
5689                         #  5) Update args with revised greedy atoms
5690
5691                         self._set_args(args)
5692                         greedy_args = []
5693                         for arg in args:
5694                                 greedy_args.append(arg)
5695                                 if not isinstance(arg, AtomArg):
5696                                         continue
5697                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5698                                         greedy_args.append(
5699                                                 AtomArg(arg=arg.arg, atom=atom,
5700                                                         root_config=arg.root_config))
5701
5702                         self._set_args(greedy_args)
5703                         del greedy_args
5704
5705                         # Revise greedy atoms, accounting for any blockers
5706                         # between selected packages.
5707                         revised_greedy_args = []
5708                         for arg in args:
5709                                 revised_greedy_args.append(arg)
5710                                 if not isinstance(arg, AtomArg):
5711                                         continue
5712                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5713                                         blocker_lookahead=True):
5714                                         revised_greedy_args.append(
5715                                                 AtomArg(arg=arg.arg, atom=atom,
5716                                                         root_config=arg.root_config))
5717                         args = revised_greedy_args
5718                         del revised_greedy_args
5719
5720                 self._set_args(args)
5721
5722                 myfavorites = set(myfavorites)
5723                 for arg in args:
5724                         if isinstance(arg, (AtomArg, PackageArg)):
5725                                 myfavorites.add(arg.atom)
5726                         elif isinstance(arg, SetArg):
5727                                 myfavorites.add(arg.arg)
5728                 myfavorites = list(myfavorites)
5729
5730                 pprovideddict = pkgsettings.pprovideddict
5731                 if debug:
5732                         portage.writemsg("\n", noiselevel=-1)
5733                 # Order needs to be preserved since a feature of --nodeps
5734                 # is to allow the user to force a specific merge order.
5735                 args.reverse()
5736                 while args:
5737                         arg = args.pop()
5738                         for atom in arg.set:
5739                                 self.spinner.update()
5740                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5741                                         root=myroot, parent=arg)
5742                                 atom_cp = portage.dep_getkey(atom)
5743                                 try:
5744                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5745                                         if pprovided and portage.match_from_list(atom, pprovided):
5746                                                 # A provided package has been specified on the command line.
5747                                                 self._pprovided_args.append((arg, atom))
5748                                                 continue
5749                                         if isinstance(arg, PackageArg):
5750                                                 if not self._add_pkg(arg.package, dep) or \
5751                                                         not self._create_graph():
5752                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5753                                                                 "dependencies for %s\n") % arg.arg)
5754                                                         return 0, myfavorites
5755                                                 continue
5756                                         if debug:
5757                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5758                                                         (arg, atom), noiselevel=-1)
5759                                         pkg, existing_node = self._select_package(
5760                                                 myroot, atom, onlydeps=onlydeps)
5761                                         if not pkg:
5762                                                 if not (isinstance(arg, SetArg) and \
5763                                                         arg.name in ("system", "world")):
5764                                                         self._unsatisfied_deps_for_display.append(
5765                                                                 ((myroot, atom), {}))
5766                                                         return 0, myfavorites
5767                                                 self._missing_args.append((arg, atom))
5768                                                 continue
5769                                         if atom_cp != pkg.cp:
5770                                                 # For old-style virtuals, we need to repeat the
5771                                                 # package.provided check against the selected package.
5772                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5773                                                 pprovided = pprovideddict.get(pkg.cp)
5774                                                 if pprovided and \
5775                                                         portage.match_from_list(expanded_atom, pprovided):
5776                                                         # A provided package has been
5777                                                         # specified on the command line.
5778                                                         self._pprovided_args.append((arg, atom))
5779                                                         continue
5780                                         if pkg.installed and "selective" not in self.myparams:
5781                                                 self._unsatisfied_deps_for_display.append(
5782                                                         ((myroot, atom), {}))
5783                                                 # Previous behavior was to bail out in this case, but
5784                                                 # since the dep is satisfied by the installed package,
5785                                                 # it's more friendly to continue building the graph
5786                                                 # and just show a warning message. Therefore, only bail
5787                                                 # out here if the atom is not from either the system or
5788                                                 # world set.
5789                                                 if not (isinstance(arg, SetArg) and \
5790                                                         arg.name in ("system", "world")):
5791                                                         return 0, myfavorites
5792
5793                                         # Add the selected package to the graph as soon as possible
5794                                         # so that later dep_check() calls can use it as feedback
5795                                         # for making more consistent atom selections.
5796                                         if not self._add_pkg(pkg, dep):
5797                                                 if isinstance(arg, SetArg):
5798                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5799                                                                 "dependencies for %s from %s\n") % \
5800                                                                 (atom, arg.arg))
5801                                                 else:
5802                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5803                                                                 "dependencies for %s\n") % atom)
5804                                                 return 0, myfavorites
5805
5806                                 except portage.exception.MissingSignature, e:
5807                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5808                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5809                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5810                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5811                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5812                                         return 0, myfavorites
5813                                 except portage.exception.InvalidSignature, e:
5814                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5815                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5816                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5817                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5818                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5819                                         return 0, myfavorites
5820                                 except SystemExit, e:
5821                                         raise # Needed else can't exit
5822                                 except Exception, e:
5823                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5824                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5825                                         raise
5826
5827                 # Now that the root packages have been added to the graph,
5828                 # process the dependencies.
5829                 if not self._create_graph():
5830                         return 0, myfavorites
5831
5832                 missing=0
5833                 if "--usepkgonly" in self.myopts:
5834                         for xs in self.digraph.all_nodes():
5835                                 if not isinstance(xs, Package):
5836                                         continue
5837                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5838                                         if missing == 0:
5839                                                 print
5840                                         missing += 1
5841                                         print "Missing binary for:",xs[2]
5842
5843                 try:
5844                         self.altlist()
5845                 except self._unknown_internal_error:
5846                         return False, myfavorites
5847
5848                 # We're true here unless we are missing binaries.
5849                 return (not missing,myfavorites)
5850
5851         def _set_args(self, args):
5852                 """
5853                 Create the "args" package set from atoms and packages given as
5854                 arguments. This method can be called multiple times if necessary.
5855                 The package selection cache is automatically invalidated, since
5856                 arguments influence package selections.
5857                 """
5858                 args_set = self._sets["args"]
5859                 args_set.clear()
5860                 for arg in args:
5861                         if not isinstance(arg, (AtomArg, PackageArg)):
5862                                 continue
5863                         atom = arg.atom
5864                         if atom in args_set:
5865                                 continue
5866                         args_set.add(atom)
5867
5868                 self._set_atoms.clear()
5869                 self._set_atoms.update(chain(*self._sets.itervalues()))
5870                 atom_arg_map = self._atom_arg_map
5871                 atom_arg_map.clear()
5872                 for arg in args:
5873                         for atom in arg.set:
5874                                 atom_key = (atom, arg.root_config.root)
5875                                 refs = atom_arg_map.get(atom_key)
5876                                 if refs is None:
5877                                         refs = []
5878                                         atom_arg_map[atom_key] = refs
5879                                         if arg not in refs:
5880                                                 refs.append(arg)
5881
5882                 # Invalidate the package selection cache, since
5883                 # arguments influence package selections.
5884                 self._highest_pkg_cache.clear()
5885                 for trees in self._filtered_trees.itervalues():
5886                         trees["porttree"].dbapi._clear_cache()
5887
5888         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5889                 """
5890                 Return a list of slot atoms corresponding to installed slots that
5891                 differ from the slot of the highest visible match. When
5892                 blocker_lookahead is True, slot atoms that would trigger a blocker
5893                 conflict are automatically discarded, potentially allowing automatic
5894                 uninstallation of older slots when appropriate.
5895                 """
5896                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5897                 if highest_pkg is None:
5898                         return []
5899                 vardb = root_config.trees["vartree"].dbapi
5900                 slots = set()
5901                 for cpv in vardb.match(atom):
5902                         # don't mix new virtuals with old virtuals
5903                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5904                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5905
5906                 slots.add(highest_pkg.metadata["SLOT"])
5907                 if len(slots) == 1:
5908                         return []
5909                 greedy_pkgs = []
5910                 slots.remove(highest_pkg.metadata["SLOT"])
5911                 while slots:
5912                         slot = slots.pop()
5913                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5914                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5915                         if pkg is not None and \
5916                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5917                                 greedy_pkgs.append(pkg)
5918                 if not greedy_pkgs:
5919                         return []
5920                 if not blocker_lookahead:
5921                         return [pkg.slot_atom for pkg in greedy_pkgs]
5922
5923                 blockers = {}
5924                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5925                 for pkg in greedy_pkgs + [highest_pkg]:
5926                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5927                         try:
5928                                 atoms = self._select_atoms(
5929                                         pkg.root, dep_str, pkg.use.enabled,
5930                                         parent=pkg, strict=True)
5931                         except portage.exception.InvalidDependString:
5932                                 continue
5933                         blocker_atoms = (x for x in atoms if x.blocker)
5934                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5935
5936                 if highest_pkg not in blockers:
5937                         return []
5938
5939                 # filter packages with invalid deps
5940                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5941
5942                 # filter packages that conflict with highest_pkg
5943                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5944                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5945                         blockers[pkg].findAtomForPackage(highest_pkg))]
5946
5947                 if not greedy_pkgs:
5948                         return []
5949
5950                 # If two packages conflict, discard the lower version.
5951                 discard_pkgs = set()
5952                 greedy_pkgs.sort(reverse=True)
5953                 for i in xrange(len(greedy_pkgs) - 1):
5954                         pkg1 = greedy_pkgs[i]
5955                         if pkg1 in discard_pkgs:
5956                                 continue
5957                         for j in xrange(i + 1, len(greedy_pkgs)):
5958                                 pkg2 = greedy_pkgs[j]
5959                                 if pkg2 in discard_pkgs:
5960                                         continue
5961                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5962                                         blockers[pkg2].findAtomForPackage(pkg1):
5963                                         # pkg1 > pkg2
5964                                         discard_pkgs.add(pkg2)
5965
5966                 return [pkg.slot_atom for pkg in greedy_pkgs \
5967                         if pkg not in discard_pkgs]
5968
5969         def _select_atoms_from_graph(self, *pargs, **kwargs):
5970                 """
5971                 Prefer atoms matching packages that have already been
5972                 added to the graph or those that are installed and have
5973                 not been scheduled for replacement.
5974                 """
5975                 kwargs["trees"] = self._graph_trees
5976                 return self._select_atoms_highest_available(*pargs, **kwargs)
5977
5978         def _select_atoms_highest_available(self, root, depstring,
5979                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5980                 """This will raise InvalidDependString if necessary. If trees is
5981                 None then self._filtered_trees is used."""
5982                 pkgsettings = self.pkgsettings[root]
5983                 if trees is None:
5984                         trees = self._filtered_trees
5985                 if not getattr(priority, "buildtime", False):
5986                         # The parent should only be passed to dep_check() for buildtime
5987                         # dependencies since that's the only case when it's appropriate
5988                         # to trigger the circular dependency avoidance code which uses it.
5989                         # It's important not to trigger the same circular dependency
5990                         # avoidance code for runtime dependencies since it's not needed
5991                         # and it can promote an incorrect package choice.
5992                         parent = None
5993                 if True:
5994                         try:
5995                                 if parent is not None:
5996                                         trees[root]["parent"] = parent
5997                                 if not strict:
5998                                         portage.dep._dep_check_strict = False
5999                                 mycheck = portage.dep_check(depstring, None,
6000                                         pkgsettings, myuse=myuse,
6001                                         myroot=root, trees=trees)
6002                         finally:
6003                                 if parent is not None:
6004                                         trees[root].pop("parent")
6005                                 portage.dep._dep_check_strict = True
6006                         if not mycheck[0]:
6007                                 raise portage.exception.InvalidDependString(mycheck[1])
6008                         selected_atoms = mycheck[1]
6009                 return selected_atoms
6010
6011         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6012                 atom = portage.dep.Atom(atom)
6013                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6014                 atom_without_use = atom
6015                 if atom.use:
6016                         atom_without_use = portage.dep.remove_slot(atom)
6017                         if atom.slot:
6018                                 atom_without_use += ":" + atom.slot
6019                         atom_without_use = portage.dep.Atom(atom_without_use)
6020                 xinfo = '"%s"' % atom
6021                 if arg:
6022                         xinfo='"%s"' % arg
6023                 # Discard null/ from failed cpv_expand category expansion.
6024                 xinfo = xinfo.replace("null/", "")
6025                 masked_packages = []
6026                 missing_use = []
6027                 masked_pkg_instances = set()
6028                 missing_licenses = []
6029                 have_eapi_mask = False
6030                 pkgsettings = self.pkgsettings[root]
6031                 implicit_iuse = pkgsettings._get_implicit_iuse()
6032                 root_config = self.roots[root]
6033                 portdb = self.roots[root].trees["porttree"].dbapi
6034                 dbs = self._filtered_trees[root]["dbs"]
6035                 for db, pkg_type, built, installed, db_keys in dbs:
6036                         if installed:
6037                                 continue
6038                         match = db.match
6039                         if hasattr(db, "xmatch"):
6040                                 cpv_list = db.xmatch("match-all", atom_without_use)
6041                         else:
6042                                 cpv_list = db.match(atom_without_use)
6043                         # descending order
6044                         cpv_list.reverse()
6045                         for cpv in cpv_list:
6046                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6047                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6048                                 if metadata is not None:
6049                                         pkg = Package(built=built, cpv=cpv,
6050                                                 installed=installed, metadata=metadata,
6051                                                 root_config=root_config)
6052                                         if pkg.cp != atom.cp:
6053                                                 # A cpv can be returned from dbapi.match() as an
6054                                                 # old-style virtual match even in cases when the
6055                                                 # package does not actually PROVIDE the virtual.
6056                                                 # Filter out any such false matches here.
6057                                                 if not atom_set.findAtomForPackage(pkg):
6058                                                         continue
6059                                         if mreasons:
6060                                                 masked_pkg_instances.add(pkg)
6061                                         if atom.use:
6062                                                 missing_use.append(pkg)
6063                                                 if not mreasons:
6064                                                         continue
6065                                 masked_packages.append(
6066                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6067
6068                 missing_use_reasons = []
6069                 missing_iuse_reasons = []
6070                 for pkg in missing_use:
6071                         use = pkg.use.enabled
6072                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6073                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6074                         missing_iuse = []
6075                         for x in atom.use.required:
6076                                 if iuse_re.match(x) is None:
6077                                         missing_iuse.append(x)
6078                         mreasons = []
6079                         if missing_iuse:
6080                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6081                                 missing_iuse_reasons.append((pkg, mreasons))
6082                         else:
6083                                 need_enable = sorted(atom.use.enabled.difference(use))
6084                                 need_disable = sorted(atom.use.disabled.intersection(use))
6085                                 if need_enable or need_disable:
6086                                         changes = []
6087                                         changes.extend(colorize("red", "+" + x) \
6088                                                 for x in need_enable)
6089                                         changes.extend(colorize("blue", "-" + x) \
6090                                                 for x in need_disable)
6091                                         mreasons.append("Change USE: %s" % " ".join(changes))
6092                                         missing_use_reasons.append((pkg, mreasons))
6093
6094                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6095                         in missing_use_reasons if pkg not in masked_pkg_instances]
6096
6097                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6098                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6099
6100                 show_missing_use = False
6101                 if unmasked_use_reasons:
6102                         # Only show the latest version.
6103                         show_missing_use = unmasked_use_reasons[:1]
6104                 elif unmasked_iuse_reasons:
6105                         if missing_use_reasons:
6106                                 # All packages with required IUSE are masked,
6107                                 # so display a normal masking message.
6108                                 pass
6109                         else:
6110                                 show_missing_use = unmasked_iuse_reasons
6111
6112                 if show_missing_use:
6113                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6114                         print "!!! One of the following packages is required to complete your request:"
6115                         for pkg, mreasons in show_missing_use:
6116                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6117
6118                 elif masked_packages:
6119                         print "\n!!! " + \
6120                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6121                                 colorize("INFORM", xinfo) + \
6122                                 colorize("BAD", " have been masked.")
6123                         print "!!! One of the following masked packages is required to complete your request:"
6124                         have_eapi_mask = show_masked_packages(masked_packages)
6125                         if have_eapi_mask:
6126                                 print
6127                                 msg = ("The current version of portage supports " + \
6128                                         "EAPI '%s'. You must upgrade to a newer version" + \
6129                                         " of portage before EAPI masked packages can" + \
6130                                         " be installed.") % portage.const.EAPI
6131                                 from textwrap import wrap
6132                                 for line in wrap(msg, 75):
6133                                         print line
6134                         print
6135                         show_mask_docs()
6136                 else:
6137                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6138
6139                 # Show parent nodes and the argument that pulled them in.
6140                 traversed_nodes = set()
6141                 node = myparent
6142                 msg = []
6143                 while node is not None:
6144                         traversed_nodes.add(node)
6145                         msg.append('(dependency required by "%s" [%s])' % \
6146                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6147                         # When traversing to parents, prefer arguments over packages
6148                         # since arguments are root nodes. Never traverse the same
6149                         # package twice, in order to prevent an infinite loop.
6150                         selected_parent = None
6151                         for parent in self.digraph.parent_nodes(node):
6152                                 if isinstance(parent, DependencyArg):
6153                                         msg.append('(dependency required by "%s" [argument])' % \
6154                                                 (colorize('INFORM', str(parent))))
6155                                         selected_parent = None
6156                                         break
6157                                 if parent not in traversed_nodes:
6158                                         selected_parent = parent
6159                         node = selected_parent
6160                 for line in msg:
6161                         print line
6162
6163                 print
6164
6165         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6166                 cache_key = (root, atom, onlydeps)
6167                 ret = self._highest_pkg_cache.get(cache_key)
6168                 if ret is not None:
6169                         pkg, existing = ret
6170                         if pkg and not existing:
6171                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6172                                 if existing and existing == pkg:
6173                                         # Update the cache to reflect that the
6174                                         # package has been added to the graph.
6175                                         ret = pkg, pkg
6176                                         self._highest_pkg_cache[cache_key] = ret
6177                         return ret
6178                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6179                 self._highest_pkg_cache[cache_key] = ret
6180                 pkg, existing = ret
6181                 if pkg is not None:
6182                         settings = pkg.root_config.settings
6183                         if visible(settings, pkg) and not (pkg.installed and \
6184                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6185                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6186                 return ret
6187
6188         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6189                 root_config = self.roots[root]
6190                 pkgsettings = self.pkgsettings[root]
6191                 dbs = self._filtered_trees[root]["dbs"]
6192                 vardb = self.roots[root].trees["vartree"].dbapi
6193                 portdb = self.roots[root].trees["porttree"].dbapi
6194                 # List of acceptable packages, ordered by type preference.
6195                 matched_packages = []
6196                 highest_version = None
6197                 if not isinstance(atom, portage.dep.Atom):
6198                         atom = portage.dep.Atom(atom)
6199                 atom_cp = atom.cp
6200                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6201                 existing_node = None
6202                 myeb = None
6203                 usepkgonly = "--usepkgonly" in self.myopts
6204                 empty = "empty" in self.myparams
6205                 selective = "selective" in self.myparams
6206                 reinstall = False
6207                 noreplace = "--noreplace" in self.myopts
6208                 # Behavior of the "selective" parameter depends on
6209                 # whether or not a package matches an argument atom.
6210                 # If an installed package provides an old-style
6211                 # virtual that is no longer provided by an available
6212                 # package, the installed package may match an argument
6213                 # atom even though none of the available packages do.
6214                 # Therefore, "selective" logic does not consider
6215                 # whether or not an installed package matches an
6216                 # argument atom. It only considers whether or not
6217                 # available packages match argument atoms, which is
6218                 # represented by the found_available_arg flag.
6219                 found_available_arg = False
6220                 for find_existing_node in True, False:
6221                         if existing_node:
6222                                 break
6223                         for db, pkg_type, built, installed, db_keys in dbs:
6224                                 if existing_node:
6225                                         break
6226                                 if installed and not find_existing_node:
6227                                         want_reinstall = reinstall or empty or \
6228                                                 (found_available_arg and not selective)
6229                                         if want_reinstall and matched_packages:
6230                                                 continue
6231                                 if hasattr(db, "xmatch"):
6232                                         cpv_list = db.xmatch("match-all", atom)
6233                                 else:
6234                                         cpv_list = db.match(atom)
6235
6236                                 # USE=multislot can make an installed package appear as if
6237                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6238                                 # won't do any good as long as USE=multislot is enabled since
6239                                 # the newly built package still won't have the expected slot.
6240                                 # Therefore, assume that such SLOT dependencies are already
6241                                 # satisfied rather than forcing a rebuild.
6242                                 if installed and not cpv_list and atom.slot:
6243                                         for cpv in db.match(atom.cp):
6244                                                 slot_available = False
6245                                                 for other_db, other_type, other_built, \
6246                                                         other_installed, other_keys in dbs:
6247                                                         try:
6248                                                                 if atom.slot == \
6249                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6250                                                                         slot_available = True
6251                                                                         break
6252                                                         except KeyError:
6253                                                                 pass
6254                                                 if not slot_available:
6255                                                         continue
6256                                                 inst_pkg = self._pkg(cpv, "installed",
6257                                                         root_config, installed=installed)
6258                                                 # Remove the slot from the atom and verify that
6259                                                 # the package matches the resulting atom.
6260                                                 atom_without_slot = portage.dep.remove_slot(atom)
6261                                                 if atom.use:
6262                                                         atom_without_slot += str(atom.use)
6263                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6264                                                 if portage.match_from_list(
6265                                                         atom_without_slot, [inst_pkg]):
6266                                                         cpv_list = [inst_pkg.cpv]
6267                                                 break
6268
6269                                 if not cpv_list:
6270                                         continue
6271                                 pkg_status = "merge"
6272                                 if installed or onlydeps:
6273                                         pkg_status = "nomerge"
6274                                 # descending order
6275                                 cpv_list.reverse()
6276                                 for cpv in cpv_list:
6277                                         # Make --noreplace take precedence over --newuse.
6278                                         if not installed and noreplace and \
6279                                                 cpv in vardb.match(atom):
6280                                                 # If the installed version is masked, it may
6281                                                 # be necessary to look at lower versions,
6282                                                 # in case there is a visible downgrade.
6283                                                 continue
6284                                         reinstall_for_flags = None
6285                                         cache_key = (pkg_type, root, cpv, pkg_status)
6286                                         calculated_use = True
6287                                         pkg = self._pkg_cache.get(cache_key)
6288                                         if pkg is None:
6289                                                 calculated_use = False
6290                                                 try:
6291                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6292                                                 except KeyError:
6293                                                         continue
6294                                                 pkg = Package(built=built, cpv=cpv,
6295                                                         installed=installed, metadata=metadata,
6296                                                         onlydeps=onlydeps, root_config=root_config,
6297                                                         type_name=pkg_type)
6298                                                 metadata = pkg.metadata
6299                                                 if not built:
6300                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6301                                                 if not built and ("?" in metadata["LICENSE"] or \
6302                                                         "?" in metadata["PROVIDE"]):
6303                                                         # This is avoided whenever possible because
6304                                                         # it's expensive. It only needs to be done here
6305                                                         # if it has an effect on visibility.
6306                                                         pkgsettings.setcpv(pkg)
6307                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6308                                                         calculated_use = True
6309                                                 self._pkg_cache[pkg] = pkg
6310
6311                                         if not installed or (built and matched_packages):
6312                                                 # Only enforce visibility on installed packages
6313                                                 # if there is at least one other visible package
6314                                                 # available. By filtering installed masked packages
6315                                                 # here, packages that have been masked since they
6316                                                 # were installed can be automatically downgraded
6317                                                 # to an unmasked version.
6318                                                 try:
6319                                                         if not visible(pkgsettings, pkg):
6320                                                                 continue
6321                                                 except portage.exception.InvalidDependString:
6322                                                         if not installed:
6323                                                                 continue
6324
6325                                                 # Enable upgrade or downgrade to a version
6326                                                 # with visible KEYWORDS when the installed
6327                                                 # version is masked by KEYWORDS, but never
6328                                                 # reinstall the same exact version only due
6329                                                 # to a KEYWORDS mask.
6330                                                 if built and matched_packages:
6331
6332                                                         different_version = None
6333                                                         for avail_pkg in matched_packages:
6334                                                                 if not portage.dep.cpvequal(
6335                                                                         pkg.cpv, avail_pkg.cpv):
6336                                                                         different_version = avail_pkg
6337                                                                         break
6338                                                         if different_version is not None:
6339
6340                                                                 if installed and \
6341                                                                         pkgsettings._getMissingKeywords(
6342                                                                         pkg.cpv, pkg.metadata):
6343                                                                         continue
6344
6345                                                                 # If the ebuild no longer exists or it's
6346                                                                 # keywords have been dropped, reject built
6347                                                                 # instances (installed or binary).
6348                                                                 # If --usepkgonly is enabled, assume that
6349                                                                 # the ebuild status should be ignored.
6350                                                                 if not usepkgonly:
6351                                                                         try:
6352                                                                                 pkg_eb = self._pkg(
6353                                                                                         pkg.cpv, "ebuild", root_config)
6354                                                                         except portage.exception.PackageNotFound:
6355                                                                                 continue
6356                                                                         else:
6357                                                                                 if not visible(pkgsettings, pkg_eb):
6358                                                                                         continue
6359
6360                                         if not pkg.built and not calculated_use:
6361                                                 # This is avoided whenever possible because
6362                                                 # it's expensive.
6363                                                 pkgsettings.setcpv(pkg)
6364                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6365
6366                                         if pkg.cp != atom.cp:
6367                                                 # A cpv can be returned from dbapi.match() as an
6368                                                 # old-style virtual match even in cases when the
6369                                                 # package does not actually PROVIDE the virtual.
6370                                                 # Filter out any such false matches here.
6371                                                 if not atom_set.findAtomForPackage(pkg):
6372                                                         continue
6373
6374                                         myarg = None
6375                                         if root == self.target_root:
6376                                                 try:
6377                                                         # Ebuild USE must have been calculated prior
6378                                                         # to this point, in case atoms have USE deps.
6379                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6380                                                 except StopIteration:
6381                                                         pass
6382                                                 except portage.exception.InvalidDependString:
6383                                                         if not installed:
6384                                                                 # masked by corruption
6385                                                                 continue
6386                                         if not installed and myarg:
6387                                                 found_available_arg = True
6388
6389                                         if atom.use and not pkg.built:
6390                                                 use = pkg.use.enabled
6391                                                 if atom.use.enabled.difference(use):
6392                                                         continue
6393                                                 if atom.use.disabled.intersection(use):
6394                                                         continue
6395                                         if pkg.cp == atom_cp:
6396                                                 if highest_version is None:
6397                                                         highest_version = pkg
6398                                                 elif pkg > highest_version:
6399                                                         highest_version = pkg
6400                                         # At this point, we've found the highest visible
6401                                         # match from the current repo. Any lower versions
6402                                         # from this repo are ignored, so this so the loop
6403                                         # will always end with a break statement below
6404                                         # this point.
6405                                         if find_existing_node:
6406                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6407                                                 if not e_pkg:
6408                                                         break
6409                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6410                                                         if highest_version and \
6411                                                                 e_pkg.cp == atom_cp and \
6412                                                                 e_pkg < highest_version and \
6413                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6414                                                                 # There is a higher version available in a
6415                                                                 # different slot, so this existing node is
6416                                                                 # irrelevant.
6417                                                                 pass
6418                                                         else:
6419                                                                 matched_packages.append(e_pkg)
6420                                                                 existing_node = e_pkg
6421                                                 break
6422                                         # Compare built package to current config and
6423                                         # reject the built package if necessary.
6424                                         if built and not installed and \
6425                                                 ("--newuse" in self.myopts or \
6426                                                 "--reinstall" in self.myopts):
6427                                                 iuses = pkg.iuse.all
6428                                                 old_use = pkg.use.enabled
6429                                                 if myeb:
6430                                                         pkgsettings.setcpv(myeb)
6431                                                 else:
6432                                                         pkgsettings.setcpv(pkg)
6433                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6434                                                 forced_flags = set()
6435                                                 forced_flags.update(pkgsettings.useforce)
6436                                                 forced_flags.update(pkgsettings.usemask)
6437                                                 cur_iuse = iuses
6438                                                 if myeb and not usepkgonly:
6439                                                         cur_iuse = myeb.iuse.all
6440                                                 if self._reinstall_for_flags(forced_flags,
6441                                                         old_use, iuses,
6442                                                         now_use, cur_iuse):
6443                                                         break
6444                                         # Compare current config to installed package
6445                                         # and do not reinstall if possible.
6446                                         if not installed and \
6447                                                 ("--newuse" in self.myopts or \
6448                                                 "--reinstall" in self.myopts) and \
6449                                                 cpv in vardb.match(atom):
6450                                                 pkgsettings.setcpv(pkg)
6451                                                 forced_flags = set()
6452                                                 forced_flags.update(pkgsettings.useforce)
6453                                                 forced_flags.update(pkgsettings.usemask)
6454                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6455                                                 old_iuse = set(filter_iuse_defaults(
6456                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6457                                                 cur_use = pkg.use.enabled
6458                                                 cur_iuse = pkg.iuse.all
6459                                                 reinstall_for_flags = \
6460                                                         self._reinstall_for_flags(
6461                                                         forced_flags, old_use, old_iuse,
6462                                                         cur_use, cur_iuse)
6463                                                 if reinstall_for_flags:
6464                                                         reinstall = True
6465                                         if not built:
6466                                                 myeb = pkg
6467                                         matched_packages.append(pkg)
6468                                         if reinstall_for_flags:
6469                                                 self._reinstall_nodes[pkg] = \
6470                                                         reinstall_for_flags
6471                                         break
6472
6473                 if not matched_packages:
6474                         return None, None
6475
6476                 if "--debug" in self.myopts:
6477                         for pkg in matched_packages:
6478                                 portage.writemsg("%s %s\n" % \
6479                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6480
6481                 # Filter out any old-style virtual matches if they are
6482                 # mixed with new-style virtual matches.
6483                 cp = portage.dep_getkey(atom)
6484                 if len(matched_packages) > 1 and \
6485                         "virtual" == portage.catsplit(cp)[0]:
6486                         for pkg in matched_packages:
6487                                 if pkg.cp != cp:
6488                                         continue
6489                                 # Got a new-style virtual, so filter
6490                                 # out any old-style virtuals.
6491                                 matched_packages = [pkg for pkg in matched_packages \
6492                                         if pkg.cp == cp]
6493                                 break
6494
6495                 if len(matched_packages) > 1:
6496                         bestmatch = portage.best(
6497                                 [pkg.cpv for pkg in matched_packages])
6498                         matched_packages = [pkg for pkg in matched_packages \
6499                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6500
6501                 # ordered by type preference ("ebuild" type is the last resort)
6502                 return  matched_packages[-1], existing_node
6503
6504         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6505                 """
6506                 Select packages that have already been added to the graph or
6507                 those that are installed and have not been scheduled for
6508                 replacement.
6509                 """
6510                 graph_db = self._graph_trees[root]["porttree"].dbapi
6511                 matches = graph_db.match_pkgs(atom)
6512                 if not matches:
6513                         return None, None
6514                 pkg = matches[-1] # highest match
6515                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6516                 return pkg, in_graph
6517
6518         def _complete_graph(self):
6519                 """
6520                 Add any deep dependencies of required sets (args, system, world) that
6521                 have not been pulled into the graph yet. This ensures that the graph
6522                 is consistent such that initially satisfied deep dependencies are not
6523                 broken in the new graph. Initially unsatisfied dependencies are
6524                 irrelevant since we only want to avoid breaking dependencies that are
6525                 intially satisfied.
6526
6527                 Since this method can consume enough time to disturb users, it is
6528                 currently only enabled by the --complete-graph option.
6529                 """
6530                 if "--buildpkgonly" in self.myopts or \
6531                         "recurse" not in self.myparams:
6532                         return 1
6533
6534                 if "complete" not in self.myparams:
6535                         # Skip this to avoid consuming enough time to disturb users.
6536                         return 1
6537
6538                 # Put the depgraph into a mode that causes it to only
6539                 # select packages that have already been added to the
6540                 # graph or those that are installed and have not been
6541                 # scheduled for replacement. Also, toggle the "deep"
6542                 # parameter so that all dependencies are traversed and
6543                 # accounted for.
6544                 self._select_atoms = self._select_atoms_from_graph
6545                 self._select_package = self._select_pkg_from_graph
6546                 already_deep = "deep" in self.myparams
6547                 if not already_deep:
6548                         self.myparams.add("deep")
6549
6550                 for root in self.roots:
6551                         required_set_names = self._required_set_names.copy()
6552                         if root == self.target_root and \
6553                                 (already_deep or "empty" in self.myparams):
6554                                 required_set_names.difference_update(self._sets)
6555                         if not required_set_names and not self._ignored_deps:
6556                                 continue
6557                         root_config = self.roots[root]
6558                         setconfig = root_config.setconfig
6559                         args = []
6560                         # Reuse existing SetArg instances when available.
6561                         for arg in self.digraph.root_nodes():
6562                                 if not isinstance(arg, SetArg):
6563                                         continue
6564                                 if arg.root_config != root_config:
6565                                         continue
6566                                 if arg.name in required_set_names:
6567                                         args.append(arg)
6568                                         required_set_names.remove(arg.name)
6569                         # Create new SetArg instances only when necessary.
6570                         for s in required_set_names:
6571                                 expanded_set = InternalPackageSet(
6572                                         initial_atoms=setconfig.getSetAtoms(s))
6573                                 atom = SETPREFIX + s
6574                                 args.append(SetArg(arg=atom, set=expanded_set,
6575                                         root_config=root_config))
6576                         vardb = root_config.trees["vartree"].dbapi
6577                         for arg in args:
6578                                 for atom in arg.set:
6579                                         self._dep_stack.append(
6580                                                 Dependency(atom=atom, root=root, parent=arg))
6581                         if self._ignored_deps:
6582                                 self._dep_stack.extend(self._ignored_deps)
6583                                 self._ignored_deps = []
6584                         if not self._create_graph(allow_unsatisfied=True):
6585                                 return 0
6586                         # Check the unsatisfied deps to see if any initially satisfied deps
6587                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6588                         # deps are irrelevant since we only want to avoid breaking deps
6589                         # that are initially satisfied.
6590                         while self._unsatisfied_deps:
6591                                 dep = self._unsatisfied_deps.pop()
6592                                 matches = vardb.match_pkgs(dep.atom)
6593                                 if not matches:
6594                                         self._initially_unsatisfied_deps.append(dep)
6595                                         continue
6596                                 # An scheduled installation broke a deep dependency.
6597                                 # Add the installed package to the graph so that it
6598                                 # will be appropriately reported as a slot collision
6599                                 # (possibly solvable via backtracking).
6600                                 pkg = matches[-1] # highest match
6601                                 if not self._add_pkg(pkg, dep):
6602                                         return 0
6603                                 if not self._create_graph(allow_unsatisfied=True):
6604                                         return 0
6605                 return 1
6606
6607         def _pkg(self, cpv, type_name, root_config, installed=False):
6608                 """
6609                 Get a package instance from the cache, or create a new
6610                 one if necessary. Raises KeyError from aux_get if it
6611                 failures for some reason (package does not exist or is
6612                 corrupt).
6613                 """
6614                 operation = "merge"
6615                 if installed:
6616                         operation = "nomerge"
6617                 pkg = self._pkg_cache.get(
6618                         (type_name, root_config.root, cpv, operation))
6619                 if pkg is None:
6620                         tree_type = self.pkg_tree_map[type_name]
6621                         db = root_config.trees[tree_type].dbapi
6622                         db_keys = list(self._trees_orig[root_config.root][
6623                                 tree_type].dbapi._aux_cache_keys)
6624                         try:
6625                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6626                         except KeyError:
6627                                 raise portage.exception.PackageNotFound(cpv)
6628                         pkg = Package(cpv=cpv, metadata=metadata,
6629                                 root_config=root_config, installed=installed)
6630                         if type_name == "ebuild":
6631                                 settings = self.pkgsettings[root_config.root]
6632                                 settings.setcpv(pkg)
6633                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6634                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6635                         self._pkg_cache[pkg] = pkg
6636                 return pkg
6637
6638         def validate_blockers(self):
6639                 """Remove any blockers from the digraph that do not match any of the
6640                 packages within the graph.  If necessary, create hard deps to ensure
6641                 correct merge order such that mutually blocking packages are never
6642                 installed simultaneously."""
6643
6644                 if "--buildpkgonly" in self.myopts or \
6645                         "--nodeps" in self.myopts:
6646                         return True
6647
6648                 #if "deep" in self.myparams:
6649                 if True:
6650                         # Pull in blockers from all installed packages that haven't already
6651                         # been pulled into the depgraph.  This is not enabled by default
6652                         # due to the performance penalty that is incurred by all the
6653                         # additional dep_check calls that are required.
6654
6655                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6656                         for myroot in self.trees:
6657                                 vardb = self.trees[myroot]["vartree"].dbapi
6658                                 portdb = self.trees[myroot]["porttree"].dbapi
6659                                 pkgsettings = self.pkgsettings[myroot]
6660                                 final_db = self.mydbapi[myroot]
6661
6662                                 blocker_cache = BlockerCache(myroot, vardb)
6663                                 stale_cache = set(blocker_cache)
6664                                 for pkg in vardb:
6665                                         cpv = pkg.cpv
6666                                         stale_cache.discard(cpv)
6667                                         pkg_in_graph = self.digraph.contains(pkg)
6668
6669                                         # Check for masked installed packages. Only warn about
6670                                         # packages that are in the graph in order to avoid warning
6671                                         # about those that will be automatically uninstalled during
6672                                         # the merge process or by --depclean.
6673                                         if pkg in final_db:
6674                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6675                                                         self._masked_installed.add(pkg)
6676
6677                                         blocker_atoms = None
6678                                         blockers = None
6679                                         if pkg_in_graph:
6680                                                 blockers = []
6681                                                 try:
6682                                                         blockers.extend(
6683                                                                 self._blocker_parents.child_nodes(pkg))
6684                                                 except KeyError:
6685                                                         pass
6686                                                 try:
6687                                                         blockers.extend(
6688                                                                 self._irrelevant_blockers.child_nodes(pkg))
6689                                                 except KeyError:
6690                                                         pass
6691                                         if blockers is not None:
6692                                                 blockers = set(str(blocker.atom) \
6693                                                         for blocker in blockers)
6694
6695                                         # If this node has any blockers, create a "nomerge"
6696                                         # node for it so that they can be enforced.
6697                                         self.spinner.update()
6698                                         blocker_data = blocker_cache.get(cpv)
6699                                         if blocker_data is not None and \
6700                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6701                                                 blocker_data = None
6702
6703                                         # If blocker data from the graph is available, use
6704                                         # it to validate the cache and update the cache if
6705                                         # it seems invalid.
6706                                         if blocker_data is not None and \
6707                                                 blockers is not None:
6708                                                 if not blockers.symmetric_difference(
6709                                                         blocker_data.atoms):
6710                                                         continue
6711                                                 blocker_data = None
6712
6713                                         if blocker_data is None and \
6714                                                 blockers is not None:
6715                                                 # Re-use the blockers from the graph.
6716                                                 blocker_atoms = sorted(blockers)
6717                                                 counter = long(pkg.metadata["COUNTER"])
6718                                                 blocker_data = \
6719                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6720                                                 blocker_cache[pkg.cpv] = blocker_data
6721                                                 continue
6722
6723                                         if blocker_data:
6724                                                 blocker_atoms = blocker_data.atoms
6725                                         else:
6726                                                 # Use aux_get() to trigger FakeVartree global
6727                                                 # updates on *DEPEND when appropriate.
6728                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6729                                                 # It is crucial to pass in final_db here in order to
6730                                                 # optimize dep_check calls by eliminating atoms via
6731                                                 # dep_wordreduce and dep_eval calls.
6732                                                 try:
6733                                                         portage.dep._dep_check_strict = False
6734                                                         try:
6735                                                                 success, atoms = portage.dep_check(depstr,
6736                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6737                                                                         trees=self._graph_trees, myroot=myroot)
6738                                                         except Exception, e:
6739                                                                 if isinstance(e, SystemExit):
6740                                                                         raise
6741                                                                 # This is helpful, for example, if a ValueError
6742                                                                 # is thrown from cpv_expand due to multiple
6743                                                                 # matches (this can happen if an atom lacks a
6744                                                                 # category).
6745                                                                 show_invalid_depstring_notice(
6746                                                                         pkg, depstr, str(e))
6747                                                                 del e
6748                                                                 raise
6749                                                 finally:
6750                                                         portage.dep._dep_check_strict = True
6751                                                 if not success:
6752                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6753                                                         if replacement_pkg and \
6754                                                                 replacement_pkg[0].operation == "merge":
6755                                                                 # This package is being replaced anyway, so
6756                                                                 # ignore invalid dependencies so as not to
6757                                                                 # annoy the user too much (otherwise they'd be
6758                                                                 # forced to manually unmerge it first).
6759                                                                 continue
6760                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6761                                                         return False
6762                                                 blocker_atoms = [myatom for myatom in atoms \
6763                                                         if myatom.startswith("!")]
6764                                                 blocker_atoms.sort()
6765                                                 counter = long(pkg.metadata["COUNTER"])
6766                                                 blocker_cache[cpv] = \
6767                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6768                                         if blocker_atoms:
6769                                                 try:
6770                                                         for atom in blocker_atoms:
6771                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6772                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6773                                                                 self._blocker_parents.add(blocker, pkg)
6774                                                 except portage.exception.InvalidAtom, e:
6775                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6776                                                         show_invalid_depstring_notice(
6777                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6778                                                         return False
6779                                 for cpv in stale_cache:
6780                                         del blocker_cache[cpv]
6781                                 blocker_cache.flush()
6782                                 del blocker_cache
6783
6784                 # Discard any "uninstall" tasks scheduled by previous calls
6785                 # to this method, since those tasks may not make sense given
6786                 # the current graph state.
6787                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6788                 if previous_uninstall_tasks:
6789                         self._blocker_uninstalls = digraph()
6790                         self.digraph.difference_update(previous_uninstall_tasks)
6791
6792                 for blocker in self._blocker_parents.leaf_nodes():
6793                         self.spinner.update()
6794                         root_config = self.roots[blocker.root]
6795                         virtuals = root_config.settings.getvirtuals()
6796                         myroot = blocker.root
6797                         initial_db = self.trees[myroot]["vartree"].dbapi
6798                         final_db = self.mydbapi[myroot]
6799                         
6800                         provider_virtual = False
6801                         if blocker.cp in virtuals and \
6802                                 not self._have_new_virt(blocker.root, blocker.cp):
6803                                 provider_virtual = True
6804
6805                         # Use this to check PROVIDE for each matched package
6806                         # when necessary.
6807                         atom_set = InternalPackageSet(
6808                                 initial_atoms=[blocker.atom])
6809
6810                         if provider_virtual:
6811                                 atoms = []
6812                                 for provider_entry in virtuals[blocker.cp]:
6813                                         provider_cp = \
6814                                                 portage.dep_getkey(provider_entry)
6815                                         atoms.append(blocker.atom.replace(
6816                                                 blocker.cp, provider_cp))
6817                         else:
6818                                 atoms = [blocker.atom]
6819
6820                         blocked_initial = set()
6821                         for atom in atoms:
6822                                 for pkg in initial_db.match_pkgs(atom):
6823                                         if atom_set.findAtomForPackage(pkg):
6824                                                 blocked_initial.add(pkg)
6825
6826                         blocked_final = set()
6827                         for atom in atoms:
6828                                 for pkg in final_db.match_pkgs(atom):
6829                                         if atom_set.findAtomForPackage(pkg):
6830                                                 blocked_final.add(pkg)
6831
6832                         if not blocked_initial and not blocked_final:
6833                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6834                                 self._blocker_parents.remove(blocker)
6835                                 # Discard any parents that don't have any more blockers.
6836                                 for pkg in parent_pkgs:
6837                                         self._irrelevant_blockers.add(blocker, pkg)
6838                                         if not self._blocker_parents.child_nodes(pkg):
6839                                                 self._blocker_parents.remove(pkg)
6840                                 continue
6841                         for parent in self._blocker_parents.parent_nodes(blocker):
6842                                 unresolved_blocks = False
6843                                 depends_on_order = set()
6844                                 for pkg in blocked_initial:
6845                                         if pkg.slot_atom == parent.slot_atom:
6846                                                 # TODO: Support blocks within slots in cases where it
6847                                                 # might make sense.  For example, a new version might
6848                                                 # require that the old version be uninstalled at build
6849                                                 # time.
6850                                                 continue
6851                                         if parent.installed:
6852                                                 # Two currently installed packages conflict with
6853                                                 # eachother. Ignore this case since the damage
6854                                                 # is already done and this would be likely to
6855                                                 # confuse users if displayed like a normal blocker.
6856                                                 continue
6857
6858                                         self._blocked_pkgs.add(pkg, blocker)
6859
6860                                         if parent.operation == "merge":
6861                                                 # Maybe the blocked package can be replaced or simply
6862                                                 # unmerged to resolve this block.
6863                                                 depends_on_order.add((pkg, parent))
6864                                                 continue
6865                                         # None of the above blocker resolutions techniques apply,
6866                                         # so apparently this one is unresolvable.
6867                                         unresolved_blocks = True
6868                                 for pkg in blocked_final:
6869                                         if pkg.slot_atom == parent.slot_atom:
6870                                                 # TODO: Support blocks within slots.
6871                                                 continue
6872                                         if parent.operation == "nomerge" and \
6873                                                 pkg.operation == "nomerge":
6874                                                 # This blocker will be handled the next time that a
6875                                                 # merge of either package is triggered.
6876                                                 continue
6877
6878                                         self._blocked_pkgs.add(pkg, blocker)
6879
6880                                         # Maybe the blocking package can be
6881                                         # unmerged to resolve this block.
6882                                         if parent.operation == "merge" and pkg.installed:
6883                                                 depends_on_order.add((pkg, parent))
6884                                                 continue
6885                                         elif parent.operation == "nomerge":
6886                                                 depends_on_order.add((parent, pkg))
6887                                                 continue
6888                                         # None of the above blocker resolutions techniques apply,
6889                                         # so apparently this one is unresolvable.
6890                                         unresolved_blocks = True
6891
6892                                 # Make sure we don't unmerge any package that have been pulled
6893                                 # into the graph.
6894                                 if not unresolved_blocks and depends_on_order:
6895                                         for inst_pkg, inst_task in depends_on_order:
6896                                                 if self.digraph.contains(inst_pkg) and \
6897                                                         self.digraph.parent_nodes(inst_pkg):
6898                                                         unresolved_blocks = True
6899                                                         break
6900
6901                                 if not unresolved_blocks and depends_on_order:
6902                                         for inst_pkg, inst_task in depends_on_order:
6903                                                 uninst_task = Package(built=inst_pkg.built,
6904                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6905                                                         metadata=inst_pkg.metadata,
6906                                                         operation="uninstall",
6907                                                         root_config=inst_pkg.root_config,
6908                                                         type_name=inst_pkg.type_name)
6909                                                 self._pkg_cache[uninst_task] = uninst_task
6910                                                 # Enforce correct merge order with a hard dep.
6911                                                 self.digraph.addnode(uninst_task, inst_task,
6912                                                         priority=BlockerDepPriority.instance)
6913                                                 # Count references to this blocker so that it can be
6914                                                 # invalidated after nodes referencing it have been
6915                                                 # merged.
6916                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6917                                 if not unresolved_blocks and not depends_on_order:
6918                                         self._irrelevant_blockers.add(blocker, parent)
6919                                         self._blocker_parents.remove_edge(blocker, parent)
6920                                         if not self._blocker_parents.parent_nodes(blocker):
6921                                                 self._blocker_parents.remove(blocker)
6922                                         if not self._blocker_parents.child_nodes(parent):
6923                                                 self._blocker_parents.remove(parent)
6924                                 if unresolved_blocks:
6925                                         self._unsolvable_blockers.add(blocker, parent)
6926
6927                 return True
6928
6929         def _accept_blocker_conflicts(self):
6930                 acceptable = False
6931                 for x in ("--buildpkgonly", "--fetchonly",
6932                         "--fetch-all-uri", "--nodeps"):
6933                         if x in self.myopts:
6934                                 acceptable = True
6935                                 break
6936                 return acceptable
6937
6938         def _merge_order_bias(self, mygraph):
6939                 """
6940                 For optimal leaf node selection, promote deep system runtime deps and
6941                 order nodes from highest to lowest overall reference count.
6942                 """
6943
6944                 node_info = {}
6945                 for node in mygraph.order:
6946                         node_info[node] = len(mygraph.parent_nodes(node))
6947                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6948
6949                 def cmp_merge_preference(node1, node2):
6950
6951                         if node1.operation == 'uninstall':
6952                                 if node2.operation == 'uninstall':
6953                                         return 0
6954                                 return 1
6955
6956                         if node2.operation == 'uninstall':
6957                                 if node1.operation == 'uninstall':
6958                                         return 0
6959                                 return -1
6960
6961                         node1_sys = node1 in deep_system_deps
6962                         node2_sys = node2 in deep_system_deps
6963                         if node1_sys != node2_sys:
6964                                 if node1_sys:
6965                                         return -1
6966                                 return 1
6967
6968                         return node_info[node2] - node_info[node1]
6969
6970                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6971
6972         def altlist(self, reversed=False):
6973
6974                 while self._serialized_tasks_cache is None:
6975                         self._resolve_conflicts()
6976                         try:
6977                                 self._serialized_tasks_cache, self._scheduler_graph = \
6978                                         self._serialize_tasks()
6979                         except self._serialize_tasks_retry:
6980                                 pass
6981
6982                 retlist = self._serialized_tasks_cache[:]
6983                 if reversed:
6984                         retlist.reverse()
6985                 return retlist
6986
6987         def schedulerGraph(self):
6988                 """
6989                 The scheduler graph is identical to the normal one except that
6990                 uninstall edges are reversed in specific cases that require
6991                 conflicting packages to be temporarily installed simultaneously.
6992                 This is intended for use by the Scheduler in it's parallelization
6993                 logic. It ensures that temporary simultaneous installation of
6994                 conflicting packages is avoided when appropriate (especially for
6995                 !!atom blockers), but allowed in specific cases that require it.
6996
6997                 Note that this method calls break_refs() which alters the state of
6998                 internal Package instances such that this depgraph instance should
6999                 not be used to perform any more calculations.
7000                 """
7001                 if self._scheduler_graph is None:
7002                         self.altlist()
7003                 self.break_refs(self._scheduler_graph.order)
7004                 return self._scheduler_graph
7005
7006         def break_refs(self, nodes):
7007                 """
7008                 Take a mergelist like that returned from self.altlist() and
7009                 break any references that lead back to the depgraph. This is
7010                 useful if you want to hold references to packages without
7011                 also holding the depgraph on the heap.
7012                 """
7013                 for node in nodes:
7014                         if hasattr(node, "root_config"):
7015                                 # The FakeVartree references the _package_cache which
7016                                 # references the depgraph. So that Package instances don't
7017                                 # hold the depgraph and FakeVartree on the heap, replace
7018                                 # the RootConfig that references the FakeVartree with the
7019                                 # original RootConfig instance which references the actual
7020                                 # vartree.
7021                                 node.root_config = \
7022                                         self._trees_orig[node.root_config.root]["root_config"]
7023
7024         def _resolve_conflicts(self):
7025                 if not self._complete_graph():
7026                         raise self._unknown_internal_error()
7027
7028                 if not self.validate_blockers():
7029                         raise self._unknown_internal_error()
7030
7031                 if self._slot_collision_info:
7032                         self._process_slot_conflicts()
7033
7034         def _serialize_tasks(self):
7035
7036                 if "--debug" in self.myopts:
7037                         writemsg("\ndigraph:\n\n", noiselevel=-1)
7038                         self.digraph.debug_print()
7039                         writemsg("\n", noiselevel=-1)
7040
7041                 scheduler_graph = self.digraph.copy()
7042
7043                 if '--nodeps' in self.myopts:
7044                         # Preserve the package order given on the command line.
7045                         return ([node for node in scheduler_graph \
7046                                 if isinstance(node, Package) \
7047                                 and node.operation == 'merge'], scheduler_graph)
7048
7049                 mygraph=self.digraph.copy()
7050                 # Prune "nomerge" root nodes if nothing depends on them, since
7051                 # otherwise they slow down merge order calculation. Don't remove
7052                 # non-root nodes since they help optimize merge order in some cases
7053                 # such as revdep-rebuild.
7054                 removed_nodes = set()
7055                 while True:
7056                         for node in mygraph.root_nodes():
7057                                 if not isinstance(node, Package) or \
7058                                         node.installed or node.onlydeps:
7059                                         removed_nodes.add(node)
7060                         if removed_nodes:
7061                                 self.spinner.update()
7062                                 mygraph.difference_update(removed_nodes)
7063                         if not removed_nodes:
7064                                 break
7065                         removed_nodes.clear()
7066                 self._merge_order_bias(mygraph)
7067                 def cmp_circular_bias(n1, n2):
7068                         """
7069                         RDEPEND is stronger than PDEPEND and this function
7070                         measures such a strength bias within a circular
7071                         dependency relationship.
7072                         """
7073                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7074                                 ignore_priority=priority_range.ignore_medium_soft)
7075                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7076                                 ignore_priority=priority_range.ignore_medium_soft)
7077                         if n1_n2_medium == n2_n1_medium:
7078                                 return 0
7079                         elif n1_n2_medium:
7080                                 return 1
7081                         return -1
7082                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7083                 retlist=[]
7084                 # Contains uninstall tasks that have been scheduled to
7085                 # occur after overlapping blockers have been installed.
7086                 scheduled_uninstalls = set()
7087                 # Contains any Uninstall tasks that have been ignored
7088                 # in order to avoid the circular deps code path. These
7089                 # correspond to blocker conflicts that could not be
7090                 # resolved.
7091                 ignored_uninstall_tasks = set()
7092                 have_uninstall_task = False
7093                 complete = "complete" in self.myparams
7094                 asap_nodes = []
7095
7096                 def get_nodes(**kwargs):
7097                         """
7098                         Returns leaf nodes excluding Uninstall instances
7099                         since those should be executed as late as possible.
7100                         """
7101                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7102                                 if isinstance(node, Package) and \
7103                                         (node.operation != "uninstall" or \
7104                                         node in scheduled_uninstalls)]
7105
7106                 # sys-apps/portage needs special treatment if ROOT="/"
7107                 running_root = self._running_root.root
7108                 from portage.const import PORTAGE_PACKAGE_ATOM
7109                 runtime_deps = InternalPackageSet(
7110                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7111                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7112                         PORTAGE_PACKAGE_ATOM)
7113                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7114                         PORTAGE_PACKAGE_ATOM)
7115
7116                 if running_portage:
7117                         running_portage = running_portage[0]
7118                 else:
7119                         running_portage = None
7120
7121                 if replacement_portage:
7122                         replacement_portage = replacement_portage[0]
7123                 else:
7124                         replacement_portage = None
7125
7126                 if replacement_portage == running_portage:
7127                         replacement_portage = None
7128
7129                 if replacement_portage is not None:
7130                         # update from running_portage to replacement_portage asap
7131                         asap_nodes.append(replacement_portage)
7132
7133                 if running_portage is not None:
7134                         try:
7135                                 portage_rdepend = self._select_atoms_highest_available(
7136                                         running_root, running_portage.metadata["RDEPEND"],
7137                                         myuse=running_portage.use.enabled,
7138                                         parent=running_portage, strict=False)
7139                         except portage.exception.InvalidDependString, e:
7140                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7141                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7142                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7143                                 del e
7144                                 portage_rdepend = []
7145                         runtime_deps.update(atom for atom in portage_rdepend \
7146                                 if not atom.startswith("!"))
7147
7148                 def gather_deps(ignore_priority, mergeable_nodes,
7149                         selected_nodes, node):
7150                         """
7151                         Recursively gather a group of nodes that RDEPEND on
7152                         eachother. This ensures that they are merged as a group
7153                         and get their RDEPENDs satisfied as soon as possible.
7154                         """
7155                         if node in selected_nodes:
7156                                 return True
7157                         if node not in mergeable_nodes:
7158                                 return False
7159                         if node == replacement_portage and \
7160                                 mygraph.child_nodes(node,
7161                                 ignore_priority=priority_range.ignore_medium_soft):
7162                                 # Make sure that portage always has all of it's
7163                                 # RDEPENDs installed first.
7164                                 return False
7165                         selected_nodes.add(node)
7166                         for child in mygraph.child_nodes(node,
7167                                 ignore_priority=ignore_priority):
7168                                 if not gather_deps(ignore_priority,
7169                                         mergeable_nodes, selected_nodes, child):
7170                                         return False
7171                         return True
7172
7173                 def ignore_uninst_or_med(priority):
7174                         if priority is BlockerDepPriority.instance:
7175                                 return True
7176                         return priority_range.ignore_medium(priority)
7177
7178                 def ignore_uninst_or_med_soft(priority):
7179                         if priority is BlockerDepPriority.instance:
7180                                 return True
7181                         return priority_range.ignore_medium_soft(priority)
7182
7183                 tree_mode = "--tree" in self.myopts
7184                 # Tracks whether or not the current iteration should prefer asap_nodes
7185                 # if available.  This is set to False when the previous iteration
7186                 # failed to select any nodes.  It is reset whenever nodes are
7187                 # successfully selected.
7188                 prefer_asap = True
7189
7190                 # Controls whether or not the current iteration should drop edges that
7191                 # are "satisfied" by installed packages, in order to solve circular
7192                 # dependencies. The deep runtime dependencies of installed packages are
7193                 # not checked in this case (bug #199856), so it must be avoided
7194                 # whenever possible.
7195                 drop_satisfied = False
7196
7197                 # State of variables for successive iterations that loosen the
7198                 # criteria for node selection.
7199                 #
7200                 # iteration   prefer_asap   drop_satisfied
7201                 # 1           True          False
7202                 # 2           False         False
7203                 # 3           False         True
7204                 #
7205                 # If no nodes are selected on the last iteration, it is due to
7206                 # unresolved blockers or circular dependencies.
7207
7208                 while not mygraph.empty():
7209                         self.spinner.update()
7210                         selected_nodes = None
7211                         ignore_priority = None
7212                         if drop_satisfied or (prefer_asap and asap_nodes):
7213                                 priority_range = DepPrioritySatisfiedRange
7214                         else:
7215                                 priority_range = DepPriorityNormalRange
7216                         if prefer_asap and asap_nodes:
7217                                 # ASAP nodes are merged before their soft deps. Go ahead and
7218                                 # select root nodes here if necessary, since it's typical for
7219                                 # the parent to have been removed from the graph already.
7220                                 asap_nodes = [node for node in asap_nodes \
7221                                         if mygraph.contains(node)]
7222                                 for node in asap_nodes:
7223                                         if not mygraph.child_nodes(node,
7224                                                 ignore_priority=priority_range.ignore_soft):
7225                                                 selected_nodes = [node]
7226                                                 asap_nodes.remove(node)
7227                                                 break
7228                         if not selected_nodes and \
7229                                 not (prefer_asap and asap_nodes):
7230                                 for i in xrange(priority_range.NONE,
7231                                         priority_range.MEDIUM_SOFT + 1):
7232                                         ignore_priority = priority_range.ignore_priority[i]
7233                                         nodes = get_nodes(ignore_priority=ignore_priority)
7234                                         if nodes:
7235                                                 # If there is a mix of uninstall nodes with other
7236                                                 # types, save the uninstall nodes for later since
7237                                                 # sometimes a merge node will render an uninstall
7238                                                 # node unnecessary (due to occupying the same slot),
7239                                                 # and we want to avoid executing a separate uninstall
7240                                                 # task in that case.
7241                                                 if len(nodes) > 1:
7242                                                         good_uninstalls = []
7243                                                         with_some_uninstalls_excluded = []
7244                                                         for node in nodes:
7245                                                                 if node.operation == "uninstall":
7246                                                                         slot_node = self.mydbapi[node.root
7247                                                                                 ].match_pkgs(node.slot_atom)
7248                                                                         if slot_node and \
7249                                                                                 slot_node[0].operation == "merge":
7250                                                                                 continue
7251                                                                         good_uninstalls.append(node)
7252                                                                 with_some_uninstalls_excluded.append(node)
7253                                                         if good_uninstalls:
7254                                                                 nodes = good_uninstalls
7255                                                         elif with_some_uninstalls_excluded:
7256                                                                 nodes = with_some_uninstalls_excluded
7257                                                         else:
7258                                                                 nodes = nodes
7259
7260                                                 if ignore_priority is None and not tree_mode:
7261                                                         # Greedily pop all of these nodes since no
7262                                                         # relationship has been ignored. This optimization
7263                                                         # destroys --tree output, so it's disabled in tree
7264                                                         # mode.
7265                                                         selected_nodes = nodes
7266                                                 else:
7267                                                         # For optimal merge order:
7268                                                         #  * Only pop one node.
7269                                                         #  * Removing a root node (node without a parent)
7270                                                         #    will not produce a leaf node, so avoid it.
7271                                                         #  * It's normal for a selected uninstall to be a
7272                                                         #    root node, so don't check them for parents.
7273                                                         for node in nodes:
7274                                                                 if node.operation == "uninstall" or \
7275                                                                         mygraph.parent_nodes(node):
7276                                                                         selected_nodes = [node]
7277                                                                         break
7278
7279                                                 if selected_nodes:
7280                                                         break
7281
7282                         if not selected_nodes:
7283                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7284                                 if nodes:
7285                                         mergeable_nodes = set(nodes)
7286                                         if prefer_asap and asap_nodes:
7287                                                 nodes = asap_nodes
7288                                         for i in xrange(priority_range.SOFT,
7289                                                 priority_range.MEDIUM_SOFT + 1):
7290                                                 ignore_priority = priority_range.ignore_priority[i]
7291                                                 for node in nodes:
7292                                                         if not mygraph.parent_nodes(node):
7293                                                                 continue
7294                                                         selected_nodes = set()
7295                                                         if gather_deps(ignore_priority,
7296                                                                 mergeable_nodes, selected_nodes, node):
7297                                                                 break
7298                                                         else:
7299                                                                 selected_nodes = None
7300                                                 if selected_nodes:
7301                                                         break
7302
7303                                         if prefer_asap and asap_nodes and not selected_nodes:
7304                                                 # We failed to find any asap nodes to merge, so ignore
7305                                                 # them for the next iteration.
7306                                                 prefer_asap = False
7307                                                 continue
7308
7309                         if selected_nodes and ignore_priority is not None:
7310                                 # Try to merge ignored medium_soft deps as soon as possible
7311                                 # if they're not satisfied by installed packages.
7312                                 for node in selected_nodes:
7313                                         children = set(mygraph.child_nodes(node))
7314                                         soft = children.difference(
7315                                                 mygraph.child_nodes(node,
7316                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7317                                         medium_soft = children.difference(
7318                                                 mygraph.child_nodes(node,
7319                                                         ignore_priority = \
7320                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7321                                         medium_soft.difference_update(soft)
7322                                         for child in medium_soft:
7323                                                 if child in selected_nodes:
7324                                                         continue
7325                                                 if child in asap_nodes:
7326                                                         continue
7327                                                 asap_nodes.append(child)
7328
7329                         if selected_nodes and len(selected_nodes) > 1:
7330                                 if not isinstance(selected_nodes, list):
7331                                         selected_nodes = list(selected_nodes)
7332                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7333
7334                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7335                                 # An Uninstall task needs to be executed in order to
7336                                 # avoid conflict if possible.
7337
7338                                 if drop_satisfied:
7339                                         priority_range = DepPrioritySatisfiedRange
7340                                 else:
7341                                         priority_range = DepPriorityNormalRange
7342
7343                                 mergeable_nodes = get_nodes(
7344                                         ignore_priority=ignore_uninst_or_med)
7345
7346                                 min_parent_deps = None
7347                                 uninst_task = None
7348                                 for task in myblocker_uninstalls.leaf_nodes():
7349                                         # Do some sanity checks so that system or world packages
7350                                         # don't get uninstalled inappropriately here (only really
7351                                         # necessary when --complete-graph has not been enabled).
7352
7353                                         if task in ignored_uninstall_tasks:
7354                                                 continue
7355
7356                                         if task in scheduled_uninstalls:
7357                                                 # It's been scheduled but it hasn't
7358                                                 # been executed yet due to dependence
7359                                                 # on installation of blocking packages.
7360                                                 continue
7361
7362                                         root_config = self.roots[task.root]
7363                                         inst_pkg = self._pkg_cache[
7364                                                 ("installed", task.root, task.cpv, "nomerge")]
7365
7366                                         if self.digraph.contains(inst_pkg):
7367                                                 continue
7368
7369                                         forbid_overlap = False
7370                                         heuristic_overlap = False
7371                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7372                                                 if blocker.eapi in ("0", "1"):
7373                                                         heuristic_overlap = True
7374                                                 elif blocker.atom.blocker.overlap.forbid:
7375                                                         forbid_overlap = True
7376                                                         break
7377                                         if forbid_overlap and running_root == task.root:
7378                                                 continue
7379
7380                                         if heuristic_overlap and running_root == task.root:
7381                                                 # Never uninstall sys-apps/portage or it's essential
7382                                                 # dependencies, except through replacement.
7383                                                 try:
7384                                                         runtime_dep_atoms = \
7385                                                                 list(runtime_deps.iterAtomsForPackage(task))
7386                                                 except portage.exception.InvalidDependString, e:
7387                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7388                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7389                                                                 (task.root, task.cpv, e), noiselevel=-1)
7390                                                         del e
7391                                                         continue
7392
7393                                                 # Don't uninstall a runtime dep if it appears
7394                                                 # to be the only suitable one installed.
7395                                                 skip = False
7396                                                 vardb = root_config.trees["vartree"].dbapi
7397                                                 for atom in runtime_dep_atoms:
7398                                                         other_version = None
7399                                                         for pkg in vardb.match_pkgs(atom):
7400                                                                 if pkg.cpv == task.cpv and \
7401                                                                         pkg.metadata["COUNTER"] == \
7402                                                                         task.metadata["COUNTER"]:
7403                                                                         continue
7404                                                                 other_version = pkg
7405                                                                 break
7406                                                         if other_version is None:
7407                                                                 skip = True
7408                                                                 break
7409                                                 if skip:
7410                                                         continue
7411
7412                                                 # For packages in the system set, don't take
7413                                                 # any chances. If the conflict can't be resolved
7414                                                 # by a normal replacement operation then abort.
7415                                                 skip = False
7416                                                 try:
7417                                                         for atom in root_config.sets[
7418                                                                 "system"].iterAtomsForPackage(task):
7419                                                                 skip = True
7420                                                                 break
7421                                                 except portage.exception.InvalidDependString, e:
7422                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7423                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7424                                                                 (task.root, task.cpv, e), noiselevel=-1)
7425                                                         del e
7426                                                         skip = True
7427                                                 if skip:
7428                                                         continue
7429
7430                                         # Note that the world check isn't always
7431                                         # necessary since self._complete_graph() will
7432                                         # add all packages from the system and world sets to the
7433                                         # graph. This just allows unresolved conflicts to be
7434                                         # detected as early as possible, which makes it possible
7435                                         # to avoid calling self._complete_graph() when it is
7436                                         # unnecessary due to blockers triggering an abortion.
7437                                         if not complete:
7438                                                 # For packages in the world set, go ahead an uninstall
7439                                                 # when necessary, as long as the atom will be satisfied
7440                                                 # in the final state.
7441                                                 graph_db = self.mydbapi[task.root]
7442                                                 skip = False
7443                                                 try:
7444                                                         for atom in root_config.sets[
7445                                                                 "world"].iterAtomsForPackage(task):
7446                                                                 satisfied = False
7447                                                                 for pkg in graph_db.match_pkgs(atom):
7448                                                                         if pkg == inst_pkg:
7449                                                                                 continue
7450                                                                         satisfied = True
7451                                                                         break
7452                                                                 if not satisfied:
7453                                                                         skip = True
7454                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7455                                                                         break
7456                                                 except portage.exception.InvalidDependString, e:
7457                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7458                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7459                                                                 (task.root, task.cpv, e), noiselevel=-1)
7460                                                         del e
7461                                                         skip = True
7462                                                 if skip:
7463                                                         continue
7464
7465                                         # Check the deps of parent nodes to ensure that
7466                                         # the chosen task produces a leaf node. Maybe
7467                                         # this can be optimized some more to make the
7468                                         # best possible choice, but the current algorithm
7469                                         # is simple and should be near optimal for most
7470                                         # common cases.
7471                                         mergeable_parent = False
7472                                         parent_deps = set()
7473                                         for parent in mygraph.parent_nodes(task):
7474                                                 parent_deps.update(mygraph.child_nodes(parent,
7475                                                         ignore_priority=priority_range.ignore_medium_soft))
7476                                                 if parent in mergeable_nodes and \
7477                                                         gather_deps(ignore_uninst_or_med_soft,
7478                                                         mergeable_nodes, set(), parent):
7479                                                         mergeable_parent = True
7480
7481                                         if not mergeable_parent:
7482                                                 continue
7483
7484                                         parent_deps.remove(task)
7485                                         if min_parent_deps is None or \
7486                                                 len(parent_deps) < min_parent_deps:
7487                                                 min_parent_deps = len(parent_deps)
7488                                                 uninst_task = task
7489
7490                                 if uninst_task is not None:
7491                                         # The uninstall is performed only after blocking
7492                                         # packages have been merged on top of it. File
7493                                         # collisions between blocking packages are detected
7494                                         # and removed from the list of files to be uninstalled.
7495                                         scheduled_uninstalls.add(uninst_task)
7496                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7497
7498                                         # Reverse the parent -> uninstall edges since we want
7499                                         # to do the uninstall after blocking packages have
7500                                         # been merged on top of it.
7501                                         mygraph.remove(uninst_task)
7502                                         for blocked_pkg in parent_nodes:
7503                                                 mygraph.add(blocked_pkg, uninst_task,
7504                                                         priority=BlockerDepPriority.instance)
7505                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7506                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7507                                                         priority=BlockerDepPriority.instance)
7508
7509                                         # Reset the state variables for leaf node selection and
7510                                         # continue trying to select leaf nodes.
7511                                         prefer_asap = True
7512                                         drop_satisfied = False
7513                                         continue
7514
7515                         if not selected_nodes:
7516                                 # Only select root nodes as a last resort. This case should
7517                                 # only trigger when the graph is nearly empty and the only
7518                                 # remaining nodes are isolated (no parents or children). Since
7519                                 # the nodes must be isolated, ignore_priority is not needed.
7520                                 selected_nodes = get_nodes()
7521
7522                         if not selected_nodes and not drop_satisfied:
7523                                 drop_satisfied = True
7524                                 continue
7525
7526                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7527                                 # If possible, drop an uninstall task here in order to avoid
7528                                 # the circular deps code path. The corresponding blocker will
7529                                 # still be counted as an unresolved conflict.
7530                                 uninst_task = None
7531                                 for node in myblocker_uninstalls.leaf_nodes():
7532                                         try:
7533                                                 mygraph.remove(node)
7534                                         except KeyError:
7535                                                 pass
7536                                         else:
7537                                                 uninst_task = node
7538                                                 ignored_uninstall_tasks.add(node)
7539                                                 break
7540
7541                                 if uninst_task is not None:
7542                                         # Reset the state variables for leaf node selection and
7543                                         # continue trying to select leaf nodes.
7544                                         prefer_asap = True
7545                                         drop_satisfied = False
7546                                         continue
7547
7548                         if not selected_nodes:
7549                                 self._circular_deps_for_display = mygraph
7550                                 raise self._unknown_internal_error()
7551
7552                         # At this point, we've succeeded in selecting one or more nodes, so
7553                         # reset state variables for leaf node selection.
7554                         prefer_asap = True
7555                         drop_satisfied = False
7556
7557                         mygraph.difference_update(selected_nodes)
7558
7559                         for node in selected_nodes:
7560                                 if isinstance(node, Package) and \
7561                                         node.operation == "nomerge":
7562                                         continue
7563
7564                                 # Handle interactions between blockers
7565                                 # and uninstallation tasks.
7566                                 solved_blockers = set()
7567                                 uninst_task = None
7568                                 if isinstance(node, Package) and \
7569                                         "uninstall" == node.operation:
7570                                         have_uninstall_task = True
7571                                         uninst_task = node
7572                                 else:
7573                                         vardb = self.trees[node.root]["vartree"].dbapi
7574                                         previous_cpv = vardb.match(node.slot_atom)
7575                                         if previous_cpv:
7576                                                 # The package will be replaced by this one, so remove
7577                                                 # the corresponding Uninstall task if necessary.
7578                                                 previous_cpv = previous_cpv[0]
7579                                                 uninst_task = \
7580                                                         ("installed", node.root, previous_cpv, "uninstall")
7581                                                 try:
7582                                                         mygraph.remove(uninst_task)
7583                                                 except KeyError:
7584                                                         pass
7585
7586                                 if uninst_task is not None and \
7587                                         uninst_task not in ignored_uninstall_tasks and \
7588                                         myblocker_uninstalls.contains(uninst_task):
7589                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7590                                         myblocker_uninstalls.remove(uninst_task)
7591                                         # Discard any blockers that this Uninstall solves.
7592                                         for blocker in blocker_nodes:
7593                                                 if not myblocker_uninstalls.child_nodes(blocker):
7594                                                         myblocker_uninstalls.remove(blocker)
7595                                                         solved_blockers.add(blocker)
7596
7597                                 retlist.append(node)
7598
7599                                 if (isinstance(node, Package) and \
7600                                         "uninstall" == node.operation) or \
7601                                         (uninst_task is not None and \
7602                                         uninst_task in scheduled_uninstalls):
7603                                         # Include satisfied blockers in the merge list
7604                                         # since the user might be interested and also
7605                                         # it serves as an indicator that blocking packages
7606                                         # will be temporarily installed simultaneously.
7607                                         for blocker in solved_blockers:
7608                                                 retlist.append(Blocker(atom=blocker.atom,
7609                                                         root=blocker.root, eapi=blocker.eapi,
7610                                                         satisfied=True))
7611
7612                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7613                 for node in myblocker_uninstalls.root_nodes():
7614                         unsolvable_blockers.add(node)
7615
7616                 for blocker in unsolvable_blockers:
7617                         retlist.append(blocker)
7618
7619                 # If any Uninstall tasks need to be executed in order
7620                 # to avoid a conflict, complete the graph with any
7621                 # dependencies that may have been initially
7622                 # neglected (to ensure that unsafe Uninstall tasks
7623                 # are properly identified and blocked from execution).
7624                 if have_uninstall_task and \
7625                         not complete and \
7626                         not unsolvable_blockers:
7627                         self.myparams.add("complete")
7628                         raise self._serialize_tasks_retry("")
7629
7630                 if unsolvable_blockers and \
7631                         not self._accept_blocker_conflicts():
7632                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7633                         self._serialized_tasks_cache = retlist[:]
7634                         self._scheduler_graph = scheduler_graph
7635                         raise self._unknown_internal_error()
7636
7637                 if self._slot_collision_info and \
7638                         not self._accept_blocker_conflicts():
7639                         self._serialized_tasks_cache = retlist[:]
7640                         self._scheduler_graph = scheduler_graph
7641                         raise self._unknown_internal_error()
7642
7643                 return retlist, scheduler_graph
7644
7645         def _show_circular_deps(self, mygraph):
7646                 # No leaf nodes are available, so we have a circular
7647                 # dependency panic situation.  Reduce the noise level to a
7648                 # minimum via repeated elimination of root nodes since they
7649                 # have no parents and thus can not be part of a cycle.
7650                 while True:
7651                         root_nodes = mygraph.root_nodes(
7652                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7653                         if not root_nodes:
7654                                 break
7655                         mygraph.difference_update(root_nodes)
7656                 # Display the USE flags that are enabled on nodes that are part
7657                 # of dependency cycles in case that helps the user decide to
7658                 # disable some of them.
7659                 display_order = []
7660                 tempgraph = mygraph.copy()
7661                 while not tempgraph.empty():
7662                         nodes = tempgraph.leaf_nodes()
7663                         if not nodes:
7664                                 node = tempgraph.order[0]
7665                         else:
7666                                 node = nodes[0]
7667                         display_order.append(node)
7668                         tempgraph.remove(node)
7669                 display_order.reverse()
7670                 self.myopts.pop("--quiet", None)
7671                 self.myopts.pop("--verbose", None)
7672                 self.myopts["--tree"] = True
7673                 portage.writemsg("\n\n", noiselevel=-1)
7674                 self.display(display_order)
7675                 prefix = colorize("BAD", " * ")
7676                 portage.writemsg("\n", noiselevel=-1)
7677                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7678                         noiselevel=-1)
7679                 portage.writemsg("\n", noiselevel=-1)
7680                 mygraph.debug_print()
7681                 portage.writemsg("\n", noiselevel=-1)
7682                 portage.writemsg(prefix + "Note that circular dependencies " + \
7683                         "can often be avoided by temporarily\n", noiselevel=-1)
7684                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7685                         "optional dependencies.\n", noiselevel=-1)
7686
7687         def _show_merge_list(self):
7688                 if self._serialized_tasks_cache is not None and \
7689                         not (self._displayed_list and \
7690                         (self._displayed_list == self._serialized_tasks_cache or \
7691                         self._displayed_list == \
7692                                 list(reversed(self._serialized_tasks_cache)))):
7693                         display_list = self._serialized_tasks_cache[:]
7694                         if "--tree" in self.myopts:
7695                                 display_list.reverse()
7696                         self.display(display_list)
7697
7698         def _show_unsatisfied_blockers(self, blockers):
7699                 self._show_merge_list()
7700                 msg = "Error: The above package list contains " + \
7701                         "packages which cannot be installed " + \
7702                         "at the same time on the same system."
7703                 prefix = colorize("BAD", " * ")
7704                 from textwrap import wrap
7705                 portage.writemsg("\n", noiselevel=-1)
7706                 for line in wrap(msg, 70):
7707                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7708
7709                 # Display the conflicting packages along with the packages
7710                 # that pulled them in. This is helpful for troubleshooting
7711                 # cases in which blockers don't solve automatically and
7712                 # the reasons are not apparent from the normal merge list
7713                 # display.
7714
7715                 conflict_pkgs = {}
7716                 for blocker in blockers:
7717                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7718                                 self._blocker_parents.parent_nodes(blocker)):
7719                                 parent_atoms = self._parent_atoms.get(pkg)
7720                                 if not parent_atoms:
7721                                         atom = self._blocked_world_pkgs.get(pkg)
7722                                         if atom is not None:
7723                                                 parent_atoms = set([("@world", atom)])
7724                                 if parent_atoms:
7725                                         conflict_pkgs[pkg] = parent_atoms
7726
7727                 if conflict_pkgs:
7728                         # Reduce noise by pruning packages that are only
7729                         # pulled in by other conflict packages.
7730                         pruned_pkgs = set()
7731                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7732                                 relevant_parent = False
7733                                 for parent, atom in parent_atoms:
7734                                         if parent not in conflict_pkgs:
7735                                                 relevant_parent = True
7736                                                 break
7737                                 if not relevant_parent:
7738                                         pruned_pkgs.add(pkg)
7739                         for pkg in pruned_pkgs:
7740                                 del conflict_pkgs[pkg]
7741
7742                 if conflict_pkgs:
7743                         msg = []
7744                         msg.append("\n")
7745                         indent = "  "
7746                         # Max number of parents shown, to avoid flooding the display.
7747                         max_parents = 3
7748                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7749
7750                                 pruned_list = set()
7751
7752                                 # Prefer packages that are not directly involved in a conflict.
7753                                 for parent_atom in parent_atoms:
7754                                         if len(pruned_list) >= max_parents:
7755                                                 break
7756                                         parent, atom = parent_atom
7757                                         if parent not in conflict_pkgs:
7758                                                 pruned_list.add(parent_atom)
7759
7760                                 for parent_atom in parent_atoms:
7761                                         if len(pruned_list) >= max_parents:
7762                                                 break
7763                                         pruned_list.add(parent_atom)
7764
7765                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7766                                 msg.append(indent + "%s pulled in by\n" % pkg)
7767
7768                                 for parent_atom in pruned_list:
7769                                         parent, atom = parent_atom
7770                                         msg.append(2*indent)
7771                                         if isinstance(parent,
7772                                                 (PackageArg, AtomArg)):
7773                                                 # For PackageArg and AtomArg types, it's
7774                                                 # redundant to display the atom attribute.
7775                                                 msg.append(str(parent))
7776                                         else:
7777                                                 # Display the specific atom from SetArg or
7778                                                 # Package types.
7779                                                 msg.append("%s required by %s" % (atom, parent))
7780                                         msg.append("\n")
7781
7782                                 if omitted_parents:
7783                                         msg.append(2*indent)
7784                                         msg.append("(and %d more)\n" % omitted_parents)
7785
7786                                 msg.append("\n")
7787
7788                         sys.stderr.write("".join(msg))
7789                         sys.stderr.flush()
7790
7791                 if "--quiet" not in self.myopts:
7792                         show_blocker_docs_link()
7793
7794         def display(self, mylist, favorites=[], verbosity=None):
7795
7796                 # This is used to prevent display_problems() from
7797                 # redundantly displaying this exact same merge list
7798                 # again via _show_merge_list().
7799                 self._displayed_list = mylist
7800
7801                 if verbosity is None:
7802                         verbosity = ("--quiet" in self.myopts and 1 or \
7803                                 "--verbose" in self.myopts and 3 or 2)
7804                 favorites_set = InternalPackageSet(favorites)
7805                 oneshot = "--oneshot" in self.myopts or \
7806                         "--onlydeps" in self.myopts
7807                 columns = "--columns" in self.myopts
7808                 changelogs=[]
7809                 p=[]
7810                 blockers = []
7811
7812                 counters = PackageCounters()
7813
7814                 if verbosity == 1 and "--verbose" not in self.myopts:
7815                         def create_use_string(*args):
7816                                 return ""
7817                 else:
7818                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7819                                 old_iuse, old_use,
7820                                 is_new, reinst_flags,
7821                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7822                                 alphabetical=("--alphabetical" in self.myopts)):
7823                                 enabled = []
7824                                 if alphabetical:
7825                                         disabled = enabled
7826                                         removed = enabled
7827                                 else:
7828                                         disabled = []
7829                                         removed = []
7830                                 cur_iuse = set(cur_iuse)
7831                                 enabled_flags = cur_iuse.intersection(cur_use)
7832                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7833                                 any_iuse = cur_iuse.union(old_iuse)
7834                                 any_iuse = list(any_iuse)
7835                                 any_iuse.sort()
7836                                 for flag in any_iuse:
7837                                         flag_str = None
7838                                         isEnabled = False
7839                                         reinst_flag = reinst_flags and flag in reinst_flags
7840                                         if flag in enabled_flags:
7841                                                 isEnabled = True
7842                                                 if is_new or flag in old_use and \
7843                                                         (all_flags or reinst_flag):
7844                                                         flag_str = red(flag)
7845                                                 elif flag not in old_iuse:
7846                                                         flag_str = yellow(flag) + "%*"
7847                                                 elif flag not in old_use:
7848                                                         flag_str = green(flag) + "*"
7849                                         elif flag in removed_iuse:
7850                                                 if all_flags or reinst_flag:
7851                                                         flag_str = yellow("-" + flag) + "%"
7852                                                         if flag in old_use:
7853                                                                 flag_str += "*"
7854                                                         flag_str = "(" + flag_str + ")"
7855                                                         removed.append(flag_str)
7856                                                 continue
7857                                         else:
7858                                                 if is_new or flag in old_iuse and \
7859                                                         flag not in old_use and \
7860                                                         (all_flags or reinst_flag):
7861                                                         flag_str = blue("-" + flag)
7862                                                 elif flag not in old_iuse:
7863                                                         flag_str = yellow("-" + flag)
7864                                                         if flag not in iuse_forced:
7865                                                                 flag_str += "%"
7866                                                 elif flag in old_use:
7867                                                         flag_str = green("-" + flag) + "*"
7868                                         if flag_str:
7869                                                 if flag in iuse_forced:
7870                                                         flag_str = "(" + flag_str + ")"
7871                                                 if isEnabled:
7872                                                         enabled.append(flag_str)
7873                                                 else:
7874                                                         disabled.append(flag_str)
7875
7876                                 if alphabetical:
7877                                         ret = " ".join(enabled)
7878                                 else:
7879                                         ret = " ".join(enabled + disabled + removed)
7880                                 if ret:
7881                                         ret = '%s="%s" ' % (name, ret)
7882                                 return ret
7883
7884                 repo_display = RepoDisplay(self.roots)
7885
7886                 tree_nodes = []
7887                 display_list = []
7888                 mygraph = self.digraph.copy()
7889
7890                 # If there are any Uninstall instances, add the corresponding
7891                 # blockers to the digraph (useful for --tree display).
7892
7893                 executed_uninstalls = set(node for node in mylist \
7894                         if isinstance(node, Package) and node.operation == "unmerge")
7895
7896                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7897                         uninstall_parents = \
7898                                 self._blocker_uninstalls.parent_nodes(uninstall)
7899                         if not uninstall_parents:
7900                                 continue
7901
7902                         # Remove the corresponding "nomerge" node and substitute
7903                         # the Uninstall node.
7904                         inst_pkg = self._pkg_cache[
7905                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7906                         try:
7907                                 mygraph.remove(inst_pkg)
7908                         except KeyError:
7909                                 pass
7910
7911                         try:
7912                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7913                         except KeyError:
7914                                 inst_pkg_blockers = []
7915
7916                         # Break the Package -> Uninstall edges.
7917                         mygraph.remove(uninstall)
7918
7919                         # Resolution of a package's blockers
7920                         # depend on it's own uninstallation.
7921                         for blocker in inst_pkg_blockers:
7922                                 mygraph.add(uninstall, blocker)
7923
7924                         # Expand Package -> Uninstall edges into
7925                         # Package -> Blocker -> Uninstall edges.
7926                         for blocker in uninstall_parents:
7927                                 mygraph.add(uninstall, blocker)
7928                                 for parent in self._blocker_parents.parent_nodes(blocker):
7929                                         if parent != inst_pkg:
7930                                                 mygraph.add(blocker, parent)
7931
7932                         # If the uninstall task did not need to be executed because
7933                         # of an upgrade, display Blocker -> Upgrade edges since the
7934                         # corresponding Blocker -> Uninstall edges will not be shown.
7935                         upgrade_node = \
7936                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7937                         if upgrade_node is not None and \
7938                                 uninstall not in executed_uninstalls:
7939                                 for blocker in uninstall_parents:
7940                                         mygraph.add(upgrade_node, blocker)
7941
7942                 unsatisfied_blockers = []
7943                 i = 0
7944                 depth = 0
7945                 shown_edges = set()
7946                 for x in mylist:
7947                         if isinstance(x, Blocker) and not x.satisfied:
7948                                 unsatisfied_blockers.append(x)
7949                                 continue
7950                         graph_key = x
7951                         if "--tree" in self.myopts:
7952                                 depth = len(tree_nodes)
7953                                 while depth and graph_key not in \
7954                                         mygraph.child_nodes(tree_nodes[depth-1]):
7955                                                 depth -= 1
7956                                 if depth:
7957                                         tree_nodes = tree_nodes[:depth]
7958                                         tree_nodes.append(graph_key)
7959                                         display_list.append((x, depth, True))
7960                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7961                                 else:
7962                                         traversed_nodes = set() # prevent endless circles
7963                                         traversed_nodes.add(graph_key)
7964                                         def add_parents(current_node, ordered):
7965                                                 parent_nodes = None
7966                                                 # Do not traverse to parents if this node is an
7967                                                 # an argument or a direct member of a set that has
7968                                                 # been specified as an argument (system or world).
7969                                                 if current_node not in self._set_nodes:
7970                                                         parent_nodes = mygraph.parent_nodes(current_node)
7971                                                 if parent_nodes:
7972                                                         child_nodes = set(mygraph.child_nodes(current_node))
7973                                                         selected_parent = None
7974                                                         # First, try to avoid a direct cycle.
7975                                                         for node in parent_nodes:
7976                                                                 if not isinstance(node, (Blocker, Package)):
7977                                                                         continue
7978                                                                 if node not in traversed_nodes and \
7979                                                                         node not in child_nodes:
7980                                                                         edge = (current_node, node)
7981                                                                         if edge in shown_edges:
7982                                                                                 continue
7983                                                                         selected_parent = node
7984                                                                         break
7985                                                         if not selected_parent:
7986                                                                 # A direct cycle is unavoidable.
7987                                                                 for node in parent_nodes:
7988                                                                         if not isinstance(node, (Blocker, Package)):
7989                                                                                 continue
7990                                                                         if node not in traversed_nodes:
7991                                                                                 edge = (current_node, node)
7992                                                                                 if edge in shown_edges:
7993                                                                                         continue
7994                                                                                 selected_parent = node
7995                                                                                 break
7996                                                         if selected_parent:
7997                                                                 shown_edges.add((current_node, selected_parent))
7998                                                                 traversed_nodes.add(selected_parent)
7999                                                                 add_parents(selected_parent, False)
8000                                                 display_list.append((current_node,
8001                                                         len(tree_nodes), ordered))
8002                                                 tree_nodes.append(current_node)
8003                                         tree_nodes = []
8004                                         add_parents(graph_key, True)
8005                         else:
8006                                 display_list.append((x, depth, True))
8007                 mylist = display_list
8008                 for x in unsatisfied_blockers:
8009                         mylist.append((x, 0, True))
8010
8011                 last_merge_depth = 0
8012                 for i in xrange(len(mylist)-1,-1,-1):
8013                         graph_key, depth, ordered = mylist[i]
8014                         if not ordered and depth == 0 and i > 0 \
8015                                 and graph_key == mylist[i-1][0] and \
8016                                 mylist[i-1][1] == 0:
8017                                 # An ordered node got a consecutive duplicate when the tree was
8018                                 # being filled in.
8019                                 del mylist[i]
8020                                 continue
8021                         if ordered and graph_key[-1] != "nomerge":
8022                                 last_merge_depth = depth
8023                                 continue
8024                         if depth >= last_merge_depth or \
8025                                 i < len(mylist) - 1 and \
8026                                 depth >= mylist[i+1][1]:
8027                                         del mylist[i]
8028
8029                 from portage import flatten
8030                 from portage.dep import use_reduce, paren_reduce
8031                 # files to fetch list - avoids counting a same file twice
8032                 # in size display (verbose mode)
8033                 myfetchlist=[]
8034
8035                 # Use this set to detect when all the "repoadd" strings are "[0]"
8036                 # and disable the entire repo display in this case.
8037                 repoadd_set = set()
8038
8039                 for mylist_index in xrange(len(mylist)):
8040                         x, depth, ordered = mylist[mylist_index]
8041                         pkg_type = x[0]
8042                         myroot = x[1]
8043                         pkg_key = x[2]
8044                         portdb = self.trees[myroot]["porttree"].dbapi
8045                         bindb  = self.trees[myroot]["bintree"].dbapi
8046                         vardb = self.trees[myroot]["vartree"].dbapi
8047                         vartree = self.trees[myroot]["vartree"]
8048                         pkgsettings = self.pkgsettings[myroot]
8049
8050                         fetch=" "
8051                         indent = " " * depth
8052
8053                         if isinstance(x, Blocker):
8054                                 if x.satisfied:
8055                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8056                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8057                                 else:
8058                                         blocker_style = "PKG_BLOCKER"
8059                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8060                                 if ordered:
8061                                         counters.blocks += 1
8062                                         if x.satisfied:
8063                                                 counters.blocks_satisfied += 1
8064                                 resolved = portage.key_expand(
8065                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8066                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8067                                         addl += " " + colorize(blocker_style, resolved)
8068                                 else:
8069                                         addl = "[%s %s] %s%s" % \
8070                                                 (colorize(blocker_style, "blocks"),
8071                                                 addl, indent, colorize(blocker_style, resolved))
8072                                 block_parents = self._blocker_parents.parent_nodes(x)
8073                                 block_parents = set([pnode[2] for pnode in block_parents])
8074                                 block_parents = ", ".join(block_parents)
8075                                 if resolved!=x[2]:
8076                                         addl += colorize(blocker_style,
8077                                                 " (\"%s\" is blocking %s)") % \
8078                                                 (str(x.atom).lstrip("!"), block_parents)
8079                                 else:
8080                                         addl += colorize(blocker_style,
8081                                                 " (is blocking %s)") % block_parents
8082                                 if isinstance(x, Blocker) and x.satisfied:
8083                                         if columns:
8084                                                 continue
8085                                         p.append(addl)
8086                                 else:
8087                                         blockers.append(addl)
8088                         else:
8089                                 pkg_status = x[3]
8090                                 pkg_merge = ordered and pkg_status == "merge"
8091                                 if not pkg_merge and pkg_status == "merge":
8092                                         pkg_status = "nomerge"
8093                                 built = pkg_type != "ebuild"
8094                                 installed = pkg_type == "installed"
8095                                 pkg = x
8096                                 metadata = pkg.metadata
8097                                 ebuild_path = None
8098                                 repo_name = metadata["repository"]
8099                                 if pkg_type == "ebuild":
8100                                         ebuild_path = portdb.findname(pkg_key)
8101                                         if not ebuild_path: # shouldn't happen
8102                                                 raise portage.exception.PackageNotFound(pkg_key)
8103                                         repo_path_real = os.path.dirname(os.path.dirname(
8104                                                 os.path.dirname(ebuild_path)))
8105                                 else:
8106                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8107                                 pkg_use = list(pkg.use.enabled)
8108                                 try:
8109                                         restrict = flatten(use_reduce(paren_reduce(
8110                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8111                                 except portage.exception.InvalidDependString, e:
8112                                         if not pkg.installed:
8113                                                 show_invalid_depstring_notice(x,
8114                                                         pkg.metadata["RESTRICT"], str(e))
8115                                                 del e
8116                                                 return 1
8117                                         restrict = []
8118                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8119                                         "fetch" in restrict:
8120                                         fetch = red("F")
8121                                         if ordered:
8122                                                 counters.restrict_fetch += 1
8123                                         if portdb.fetch_check(pkg_key, pkg_use):
8124                                                 fetch = green("f")
8125                                                 if ordered:
8126                                                         counters.restrict_fetch_satisfied += 1
8127
8128                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8129                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8130                                 myoldbest = []
8131                                 myinslotlist = None
8132                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8133                                 if vardb.cpv_exists(pkg_key):
8134                                         addl="  "+yellow("R")+fetch+"  "
8135                                         if ordered:
8136                                                 if pkg_merge:
8137                                                         counters.reinst += 1
8138                                                 elif pkg_status == "uninstall":
8139                                                         counters.uninst += 1
8140                                 # filter out old-style virtual matches
8141                                 elif installed_versions and \
8142                                         portage.cpv_getkey(installed_versions[0]) == \
8143                                         portage.cpv_getkey(pkg_key):
8144                                         myinslotlist = vardb.match(pkg.slot_atom)
8145                                         # If this is the first install of a new-style virtual, we
8146                                         # need to filter out old-style virtual matches.
8147                                         if myinslotlist and \
8148                                                 portage.cpv_getkey(myinslotlist[0]) != \
8149                                                 portage.cpv_getkey(pkg_key):
8150                                                 myinslotlist = None
8151                                         if myinslotlist:
8152                                                 myoldbest = myinslotlist[:]
8153                                                 addl = "   " + fetch
8154                                                 if not portage.dep.cpvequal(pkg_key,
8155                                                         portage.best([pkg_key] + myoldbest)):
8156                                                         # Downgrade in slot
8157                                                         addl += turquoise("U")+blue("D")
8158                                                         if ordered:
8159                                                                 counters.downgrades += 1
8160                                                 else:
8161                                                         # Update in slot
8162                                                         addl += turquoise("U") + " "
8163                                                         if ordered:
8164                                                                 counters.upgrades += 1
8165                                         else:
8166                                                 # New slot, mark it new.
8167                                                 addl = " " + green("NS") + fetch + "  "
8168                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8169                                                 if ordered:
8170                                                         counters.newslot += 1
8171
8172                                         if "--changelog" in self.myopts:
8173                                                 inst_matches = vardb.match(pkg.slot_atom)
8174                                                 if inst_matches:
8175                                                         changelogs.extend(self.calc_changelog(
8176                                                                 portdb.findname(pkg_key),
8177                                                                 inst_matches[0], pkg_key))
8178                                 else:
8179                                         addl = " " + green("N") + " " + fetch + "  "
8180                                         if ordered:
8181                                                 counters.new += 1
8182
8183                                 verboseadd = ""
8184                                 repoadd = None
8185
8186                                 if True:
8187                                         # USE flag display
8188                                         forced_flags = set()
8189                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8190                                         forced_flags.update(pkgsettings.useforce)
8191                                         forced_flags.update(pkgsettings.usemask)
8192
8193                                         cur_use = [flag for flag in pkg.use.enabled \
8194                                                 if flag in pkg.iuse.all]
8195                                         cur_iuse = sorted(pkg.iuse.all)
8196
8197                                         if myoldbest and myinslotlist:
8198                                                 previous_cpv = myoldbest[0]
8199                                         else:
8200                                                 previous_cpv = pkg.cpv
8201                                         if vardb.cpv_exists(previous_cpv):
8202                                                 old_iuse, old_use = vardb.aux_get(
8203                                                                 previous_cpv, ["IUSE", "USE"])
8204                                                 old_iuse = list(set(
8205                                                         filter_iuse_defaults(old_iuse.split())))
8206                                                 old_iuse.sort()
8207                                                 old_use = old_use.split()
8208                                                 is_new = False
8209                                         else:
8210                                                 old_iuse = []
8211                                                 old_use = []
8212                                                 is_new = True
8213
8214                                         old_use = [flag for flag in old_use if flag in old_iuse]
8215
8216                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8217                                         use_expand.sort()
8218                                         use_expand.reverse()
8219                                         use_expand_hidden = \
8220                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8221
8222                                         def map_to_use_expand(myvals, forcedFlags=False,
8223                                                 removeHidden=True):
8224                                                 ret = {}
8225                                                 forced = {}
8226                                                 for exp in use_expand:
8227                                                         ret[exp] = []
8228                                                         forced[exp] = set()
8229                                                         for val in myvals[:]:
8230                                                                 if val.startswith(exp.lower()+"_"):
8231                                                                         if val in forced_flags:
8232                                                                                 forced[exp].add(val[len(exp)+1:])
8233                                                                         ret[exp].append(val[len(exp)+1:])
8234                                                                         myvals.remove(val)
8235                                                 ret["USE"] = myvals
8236                                                 forced["USE"] = [val for val in myvals \
8237                                                         if val in forced_flags]
8238                                                 if removeHidden:
8239                                                         for exp in use_expand_hidden:
8240                                                                 ret.pop(exp, None)
8241                                                 if forcedFlags:
8242                                                         return ret, forced
8243                                                 return ret
8244
8245                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8246                                         # are the only thing that triggered reinstallation.
8247                                         reinst_flags_map = {}
8248                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8249                                         reinst_expand_map = None
8250                                         if reinstall_for_flags:
8251                                                 reinst_flags_map = map_to_use_expand(
8252                                                         list(reinstall_for_flags), removeHidden=False)
8253                                                 for k in list(reinst_flags_map):
8254                                                         if not reinst_flags_map[k]:
8255                                                                 del reinst_flags_map[k]
8256                                                 if not reinst_flags_map.get("USE"):
8257                                                         reinst_expand_map = reinst_flags_map.copy()
8258                                                         reinst_expand_map.pop("USE", None)
8259                                         if reinst_expand_map and \
8260                                                 not set(reinst_expand_map).difference(
8261                                                 use_expand_hidden):
8262                                                 use_expand_hidden = \
8263                                                         set(use_expand_hidden).difference(
8264                                                         reinst_expand_map)
8265
8266                                         cur_iuse_map, iuse_forced = \
8267                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8268                                         cur_use_map = map_to_use_expand(cur_use)
8269                                         old_iuse_map = map_to_use_expand(old_iuse)
8270                                         old_use_map = map_to_use_expand(old_use)
8271
8272                                         use_expand.sort()
8273                                         use_expand.insert(0, "USE")
8274                                         
8275                                         for key in use_expand:
8276                                                 if key in use_expand_hidden:
8277                                                         continue
8278                                                 verboseadd += create_use_string(key.upper(),
8279                                                         cur_iuse_map[key], iuse_forced[key],
8280                                                         cur_use_map[key], old_iuse_map[key],
8281                                                         old_use_map[key], is_new,
8282                                                         reinst_flags_map.get(key))
8283
8284                                 if verbosity == 3:
8285                                         # size verbose
8286                                         mysize=0
8287                                         if pkg_type == "ebuild" and pkg_merge:
8288                                                 try:
8289                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8290                                                                 useflags=pkg_use, debug=self.edebug)
8291                                                 except portage.exception.InvalidDependString, e:
8292                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8293                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8294                                                         del e
8295                                                         return 1
8296                                                 if myfilesdict is None:
8297                                                         myfilesdict="[empty/missing/bad digest]"
8298                                                 else:
8299                                                         for myfetchfile in myfilesdict:
8300                                                                 if myfetchfile not in myfetchlist:
8301                                                                         mysize+=myfilesdict[myfetchfile]
8302                                                                         myfetchlist.append(myfetchfile)
8303                                                         if ordered:
8304                                                                 counters.totalsize += mysize
8305                                                 verboseadd += format_size(mysize)
8306
8307                                         # overlay verbose
8308                                         # assign index for a previous version in the same slot
8309                                         has_previous = False
8310                                         repo_name_prev = None
8311                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8312                                                 metadata["SLOT"])
8313                                         slot_matches = vardb.match(slot_atom)
8314                                         if slot_matches:
8315                                                 has_previous = True
8316                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8317                                                         ["repository"])[0]
8318
8319                                         # now use the data to generate output
8320                                         if pkg.installed or not has_previous:
8321                                                 repoadd = repo_display.repoStr(repo_path_real)
8322                                         else:
8323                                                 repo_path_prev = None
8324                                                 if repo_name_prev:
8325                                                         repo_path_prev = portdb.getRepositoryPath(
8326                                                                 repo_name_prev)
8327                                                 if repo_path_prev == repo_path_real:
8328                                                         repoadd = repo_display.repoStr(repo_path_real)
8329                                                 else:
8330                                                         repoadd = "%s=>%s" % (
8331                                                                 repo_display.repoStr(repo_path_prev),
8332                                                                 repo_display.repoStr(repo_path_real))
8333                                         if repoadd:
8334                                                 repoadd_set.add(repoadd)
8335
8336                                 xs = [portage.cpv_getkey(pkg_key)] + \
8337                                         list(portage.catpkgsplit(pkg_key)[2:])
8338                                 if xs[2] == "r0":
8339                                         xs[2] = ""
8340                                 else:
8341                                         xs[2] = "-" + xs[2]
8342
8343                                 mywidth = 130
8344                                 if "COLUMNWIDTH" in self.settings:
8345                                         try:
8346                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8347                                         except ValueError, e:
8348                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8349                                                 portage.writemsg(
8350                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8351                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8352                                                 del e
8353                                 oldlp = mywidth - 30
8354                                 newlp = oldlp - 30
8355
8356                                 # Convert myoldbest from a list to a string.
8357                                 if not myoldbest:
8358                                         myoldbest = ""
8359                                 else:
8360                                         for pos, key in enumerate(myoldbest):
8361                                                 key = portage.catpkgsplit(key)[2] + \
8362                                                         "-" + portage.catpkgsplit(key)[3]
8363                                                 if key[-3:] == "-r0":
8364                                                         key = key[:-3]
8365                                                 myoldbest[pos] = key
8366                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8367
8368                                 pkg_cp = xs[0]
8369                                 root_config = self.roots[myroot]
8370                                 system_set = root_config.sets["system"]
8371                                 world_set  = root_config.sets["world"]
8372
8373                                 pkg_system = False
8374                                 pkg_world = False
8375                                 try:
8376                                         pkg_system = system_set.findAtomForPackage(pkg)
8377                                         pkg_world  = world_set.findAtomForPackage(pkg)
8378                                         if not (oneshot or pkg_world) and \
8379                                                 myroot == self.target_root and \
8380                                                 favorites_set.findAtomForPackage(pkg):
8381                                                 # Maybe it will be added to world now.
8382                                                 if create_world_atom(pkg, favorites_set, root_config):
8383                                                         pkg_world = True
8384                                 except portage.exception.InvalidDependString:
8385                                         # This is reported elsewhere if relevant.
8386                                         pass
8387
8388                                 def pkgprint(pkg_str):
8389                                         if pkg_merge:
8390                                                 if pkg_system:
8391                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8392                                                 elif pkg_world:
8393                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8394                                                 else:
8395                                                         return colorize("PKG_MERGE", pkg_str)
8396                                         elif pkg_status == "uninstall":
8397                                                 return colorize("PKG_UNINSTALL", pkg_str)
8398                                         else:
8399                                                 if pkg_system:
8400                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8401                                                 elif pkg_world:
8402                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8403                                                 else:
8404                                                         return colorize("PKG_NOMERGE", pkg_str)
8405
8406                                 try:
8407                                         properties = flatten(use_reduce(paren_reduce(
8408                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8409                                 except portage.exception.InvalidDependString, e:
8410                                         if not pkg.installed:
8411                                                 show_invalid_depstring_notice(pkg,
8412                                                         pkg.metadata["PROPERTIES"], str(e))
8413                                                 del e
8414                                                 return 1
8415                                         properties = []
8416                                 interactive = "interactive" in properties
8417                                 if interactive and pkg.operation == "merge":
8418                                         addl = colorize("WARN", "I") + addl[1:]
8419                                         if ordered:
8420                                                 counters.interactive += 1
8421
8422                                 if x[1]!="/":
8423                                         if myoldbest:
8424                                                 myoldbest +=" "
8425                                         if "--columns" in self.myopts:
8426                                                 if "--quiet" in self.myopts:
8427                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8428                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8429                                                         myprint=myprint+myoldbest
8430                                                         myprint=myprint+darkgreen("to "+x[1])
8431                                                         verboseadd = None
8432                                                 else:
8433                                                         if not pkg_merge:
8434                                                                 myprint = "[%s] %s%s" % \
8435                                                                         (pkgprint(pkg_status.ljust(13)),
8436                                                                         indent, pkgprint(pkg.cp))
8437                                                         else:
8438                                                                 myprint = "[%s %s] %s%s" % \
8439                                                                         (pkgprint(pkg.type_name), addl,
8440                                                                         indent, pkgprint(pkg.cp))
8441                                                         if (newlp-nc_len(myprint)) > 0:
8442                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8443                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8444                                                         if (oldlp-nc_len(myprint)) > 0:
8445                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8446                                                         myprint=myprint+myoldbest
8447                                                         myprint += darkgreen("to " + pkg.root)
8448                                         else:
8449                                                 if not pkg_merge:
8450                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8451                                                 else:
8452                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8453                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8454                                                         myoldbest + darkgreen("to " + myroot)
8455                                 else:
8456                                         if "--columns" in self.myopts:
8457                                                 if "--quiet" in self.myopts:
8458                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8459                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8460                                                         myprint=myprint+myoldbest
8461                                                         verboseadd = None
8462                                                 else:
8463                                                         if not pkg_merge:
8464                                                                 myprint = "[%s] %s%s" % \
8465                                                                         (pkgprint(pkg_status.ljust(13)),
8466                                                                         indent, pkgprint(pkg.cp))
8467                                                         else:
8468                                                                 myprint = "[%s %s] %s%s" % \
8469                                                                         (pkgprint(pkg.type_name), addl,
8470                                                                         indent, pkgprint(pkg.cp))
8471                                                         if (newlp-nc_len(myprint)) > 0:
8472                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8473                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8474                                                         if (oldlp-nc_len(myprint)) > 0:
8475                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8476                                                         myprint += myoldbest
8477                                         else:
8478                                                 if not pkg_merge:
8479                                                         myprint = "[%s] %s%s %s" % \
8480                                                                 (pkgprint(pkg_status.ljust(13)),
8481                                                                 indent, pkgprint(pkg.cpv),
8482                                                                 myoldbest)
8483                                                 else:
8484                                                         myprint = "[%s %s] %s%s %s" % \
8485                                                                 (pkgprint(pkg_type), addl, indent,
8486                                                                 pkgprint(pkg.cpv), myoldbest)
8487
8488                                 if columns and pkg.operation == "uninstall":
8489                                         continue
8490                                 p.append((myprint, verboseadd, repoadd))
8491
8492                                 if "--tree" not in self.myopts and \
8493                                         "--quiet" not in self.myopts and \
8494                                         not self._opts_no_restart.intersection(self.myopts) and \
8495                                         pkg.root == self._running_root.root and \
8496                                         portage.match_from_list(
8497                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8498                                         not vardb.cpv_exists(pkg.cpv) and \
8499                                         "--quiet" not in self.myopts:
8500                                                 if mylist_index < len(mylist) - 1:
8501                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8502                                                         p.append(colorize("WARN", "    then resume the merge."))
8503
8504                 out = sys.stdout
8505                 show_repos = repoadd_set and repoadd_set != set(["0"])
8506
8507                 for x in p:
8508                         if isinstance(x, basestring):
8509                                 out.write("%s\n" % (x,))
8510                                 continue
8511
8512                         myprint, verboseadd, repoadd = x
8513
8514                         if verboseadd:
8515                                 myprint += " " + verboseadd
8516
8517                         if show_repos and repoadd:
8518                                 myprint += " " + teal("[%s]" % repoadd)
8519
8520                         out.write("%s\n" % (myprint,))
8521
8522                 for x in blockers:
8523                         print x
8524
8525                 if verbosity == 3:
8526                         print
8527                         print counters
8528                         if show_repos:
8529                                 sys.stdout.write(str(repo_display))
8530
8531                 if "--changelog" in self.myopts:
8532                         print
8533                         for revision,text in changelogs:
8534                                 print bold('*'+revision)
8535                                 sys.stdout.write(text)
8536
8537                 sys.stdout.flush()
8538                 return os.EX_OK
8539
8540         def display_problems(self):
8541                 """
8542                 Display problems with the dependency graph such as slot collisions.
8543                 This is called internally by display() to show the problems _after_
8544                 the merge list where it is most likely to be seen, but if display()
8545                 is not going to be called then this method should be called explicitly
8546                 to ensure that the user is notified of problems with the graph.
8547
8548                 All output goes to stderr, except for unsatisfied dependencies which
8549                 go to stdout for parsing by programs such as autounmask.
8550                 """
8551
8552                 # Note that show_masked_packages() sends it's output to
8553                 # stdout, and some programs such as autounmask parse the
8554                 # output in cases when emerge bails out. However, when
8555                 # show_masked_packages() is called for installed packages
8556                 # here, the message is a warning that is more appropriate
8557                 # to send to stderr, so temporarily redirect stdout to
8558                 # stderr. TODO: Fix output code so there's a cleaner way
8559                 # to redirect everything to stderr.
8560                 sys.stdout.flush()
8561                 sys.stderr.flush()
8562                 stdout = sys.stdout
8563                 try:
8564                         sys.stdout = sys.stderr
8565                         self._display_problems()
8566                 finally:
8567                         sys.stdout = stdout
8568                         sys.stdout.flush()
8569                         sys.stderr.flush()
8570
8571                 # This goes to stdout for parsing by programs like autounmask.
8572                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8573                         self._show_unsatisfied_dep(*pargs, **kwargs)
8574
8575         def _display_problems(self):
8576                 if self._circular_deps_for_display is not None:
8577                         self._show_circular_deps(
8578                                 self._circular_deps_for_display)
8579
8580                 # The user is only notified of a slot conflict if
8581                 # there are no unresolvable blocker conflicts.
8582                 if self._unsatisfied_blockers_for_display is not None:
8583                         self._show_unsatisfied_blockers(
8584                                 self._unsatisfied_blockers_for_display)
8585                 else:
8586                         self._show_slot_collision_notice()
8587
8588                 # TODO: Add generic support for "set problem" handlers so that
8589                 # the below warnings aren't special cases for world only.
8590
8591                 if self._missing_args:
8592                         world_problems = False
8593                         if "world" in self._sets:
8594                                 # Filter out indirect members of world (from nested sets)
8595                                 # since only direct members of world are desired here.
8596                                 world_set = self.roots[self.target_root].sets["world"]
8597                                 for arg, atom in self._missing_args:
8598                                         if arg.name == "world" and atom in world_set:
8599                                                 world_problems = True
8600                                                 break
8601
8602                         if world_problems:
8603                                 sys.stderr.write("\n!!! Problems have been " + \
8604                                         "detected with your world file\n")
8605                                 sys.stderr.write("!!! Please run " + \
8606                                         green("emaint --check world")+"\n\n")
8607
8608                 if self._missing_args:
8609                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8610                                 " Ebuilds for the following packages are either all\n")
8611                         sys.stderr.write(colorize("BAD", "!!!") + \
8612                                 " masked or don't exist:\n")
8613                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8614                                 self._missing_args) + "\n")
8615
8616                 if self._pprovided_args:
8617                         arg_refs = {}
8618                         for arg, atom in self._pprovided_args:
8619                                 if isinstance(arg, SetArg):
8620                                         parent = arg.name
8621                                         arg_atom = (atom, atom)
8622                                 else:
8623                                         parent = "args"
8624                                         arg_atom = (arg.arg, atom)
8625                                 refs = arg_refs.setdefault(arg_atom, [])
8626                                 if parent not in refs:
8627                                         refs.append(parent)
8628                         msg = []
8629                         msg.append(bad("\nWARNING: "))
8630                         if len(self._pprovided_args) > 1:
8631                                 msg.append("Requested packages will not be " + \
8632                                         "merged because they are listed in\n")
8633                         else:
8634                                 msg.append("A requested package will not be " + \
8635                                         "merged because it is listed in\n")
8636                         msg.append("package.provided:\n\n")
8637                         problems_sets = set()
8638                         for (arg, atom), refs in arg_refs.iteritems():
8639                                 ref_string = ""
8640                                 if refs:
8641                                         problems_sets.update(refs)
8642                                         refs.sort()
8643                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8644                                         ref_string = " pulled in by " + ref_string
8645                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8646                         msg.append("\n")
8647                         if "world" in problems_sets:
8648                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8649                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8650                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8651                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8652                                 msg.append("The best course of action depends on the reason that an offending\n")
8653                                 msg.append("package.provided entry exists.\n\n")
8654                         sys.stderr.write("".join(msg))
8655
8656                 masked_packages = []
8657                 for pkg in self._masked_installed:
8658                         root_config = pkg.root_config
8659                         pkgsettings = self.pkgsettings[pkg.root]
8660                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8661                         masked_packages.append((root_config, pkgsettings,
8662                                 pkg.cpv, pkg.metadata, mreasons))
8663                 if masked_packages:
8664                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8665                                 " The following installed packages are masked:\n")
8666                         show_masked_packages(masked_packages)
8667                         show_mask_docs()
8668                         print
8669
8670         def calc_changelog(self,ebuildpath,current,next):
8671                 if ebuildpath == None or not os.path.exists(ebuildpath):
8672                         return []
8673                 current = '-'.join(portage.catpkgsplit(current)[1:])
8674                 if current.endswith('-r0'):
8675                         current = current[:-3]
8676                 next = '-'.join(portage.catpkgsplit(next)[1:])
8677                 if next.endswith('-r0'):
8678                         next = next[:-3]
8679                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8680                 try:
8681                         changelog = open(changelogpath).read()
8682                 except SystemExit, e:
8683                         raise # Needed else can't exit
8684                 except:
8685                         return []
8686                 divisions = self.find_changelog_tags(changelog)
8687                 #print 'XX from',current,'to',next
8688                 #for div,text in divisions: print 'XX',div
8689                 # skip entries for all revisions above the one we are about to emerge
8690                 for i in range(len(divisions)):
8691                         if divisions[i][0]==next:
8692                                 divisions = divisions[i:]
8693                                 break
8694                 # find out how many entries we are going to display
8695                 for i in range(len(divisions)):
8696                         if divisions[i][0]==current:
8697                                 divisions = divisions[:i]
8698                                 break
8699                 else:
8700                     # couldnt find the current revision in the list. display nothing
8701                         return []
8702                 return divisions
8703
8704         def find_changelog_tags(self,changelog):
8705                 divs = []
8706                 release = None
8707                 while 1:
8708                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8709                         if match is None:
8710                                 if release is not None:
8711                                         divs.append((release,changelog))
8712                                 return divs
8713                         if release is not None:
8714                                 divs.append((release,changelog[:match.start()]))
8715                         changelog = changelog[match.end():]
8716                         release = match.group(1)
8717                         if release.endswith('.ebuild'):
8718                                 release = release[:-7]
8719                         if release.endswith('-r0'):
8720                                 release = release[:-3]
8721
8722         def saveNomergeFavorites(self):
8723                 """Find atoms in favorites that are not in the mergelist and add them
8724                 to the world file if necessary."""
8725                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8726                         "--oneshot", "--onlydeps", "--pretend"):
8727                         if x in self.myopts:
8728                                 return
8729                 root_config = self.roots[self.target_root]
8730                 world_set = root_config.sets["world"]
8731
8732                 world_locked = False
8733                 if hasattr(world_set, "lock"):
8734                         world_set.lock()
8735                         world_locked = True
8736
8737                 if hasattr(world_set, "load"):
8738                         world_set.load() # maybe it's changed on disk
8739
8740                 args_set = self._sets["args"]
8741                 portdb = self.trees[self.target_root]["porttree"].dbapi
8742                 added_favorites = set()
8743                 for x in self._set_nodes:
8744                         pkg_type, root, pkg_key, pkg_status = x
8745                         if pkg_status != "nomerge":
8746                                 continue
8747
8748                         try:
8749                                 myfavkey = create_world_atom(x, args_set, root_config)
8750                                 if myfavkey:
8751                                         if myfavkey in added_favorites:
8752                                                 continue
8753                                         added_favorites.add(myfavkey)
8754                         except portage.exception.InvalidDependString, e:
8755                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8756                                         (pkg_key, str(e)), noiselevel=-1)
8757                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8758                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8759                                 del e
8760                 all_added = []
8761                 for k in self._sets:
8762                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8763                                 continue
8764                         s = SETPREFIX + k
8765                         if s in world_set:
8766                                 continue
8767                         all_added.append(SETPREFIX + k)
8768                 all_added.extend(added_favorites)
8769                 all_added.sort()
8770                 for a in all_added:
8771                         print ">>> Recording %s in \"world\" favorites file..." % \
8772                                 colorize("INFORM", str(a))
8773                 if all_added:
8774                         world_set.update(all_added)
8775
8776                 if world_locked:
8777                         world_set.unlock()
8778
8779         def loadResumeCommand(self, resume_data, skip_masked=True,
8780                 skip_missing=True):
8781                 """
8782                 Add a resume command to the graph and validate it in the process.  This
8783                 will raise a PackageNotFound exception if a package is not available.
8784                 """
8785
8786                 if not isinstance(resume_data, dict):
8787                         return False
8788
8789                 mergelist = resume_data.get("mergelist")
8790                 if not isinstance(mergelist, list):
8791                         mergelist = []
8792
8793                 fakedb = self.mydbapi
8794                 trees = self.trees
8795                 serialized_tasks = []
8796                 masked_tasks = []
8797                 for x in mergelist:
8798                         if not (isinstance(x, list) and len(x) == 4):
8799                                 continue
8800                         pkg_type, myroot, pkg_key, action = x
8801                         if pkg_type not in self.pkg_tree_map:
8802                                 continue
8803                         if action != "merge":
8804                                 continue
8805                         tree_type = self.pkg_tree_map[pkg_type]
8806                         mydb = trees[myroot][tree_type].dbapi
8807                         db_keys = list(self._trees_orig[myroot][
8808                                 tree_type].dbapi._aux_cache_keys)
8809                         try:
8810                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8811                         except KeyError:
8812                                 # It does no exist or it is corrupt.
8813                                 if action == "uninstall":
8814                                         continue
8815                                 if skip_missing:
8816                                         # TODO: log these somewhere
8817                                         continue
8818                                 raise portage.exception.PackageNotFound(pkg_key)
8819                         installed = action == "uninstall"
8820                         built = pkg_type != "ebuild"
8821                         root_config = self.roots[myroot]
8822                         pkg = Package(built=built, cpv=pkg_key,
8823                                 installed=installed, metadata=metadata,
8824                                 operation=action, root_config=root_config,
8825                                 type_name=pkg_type)
8826                         if pkg_type == "ebuild":
8827                                 pkgsettings = self.pkgsettings[myroot]
8828                                 pkgsettings.setcpv(pkg)
8829                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8830                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8831                         self._pkg_cache[pkg] = pkg
8832
8833                         root_config = self.roots[pkg.root]
8834                         if "merge" == pkg.operation and \
8835                                 not visible(root_config.settings, pkg):
8836                                 if skip_masked:
8837                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8838                                 else:
8839                                         self._unsatisfied_deps_for_display.append(
8840                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8841
8842                         fakedb[myroot].cpv_inject(pkg)
8843                         serialized_tasks.append(pkg)
8844                         self.spinner.update()
8845
8846                 if self._unsatisfied_deps_for_display:
8847                         return False
8848
8849                 if not serialized_tasks or "--nodeps" in self.myopts:
8850                         self._serialized_tasks_cache = serialized_tasks
8851                         self._scheduler_graph = self.digraph
8852                 else:
8853                         self._select_package = self._select_pkg_from_graph
8854                         self.myparams.add("selective")
8855                         # Always traverse deep dependencies in order to account for
8856                         # potentially unsatisfied dependencies of installed packages.
8857                         # This is necessary for correct --keep-going or --resume operation
8858                         # in case a package from a group of circularly dependent packages
8859                         # fails. In this case, a package which has recently been installed
8860                         # may have an unsatisfied circular dependency (pulled in by
8861                         # PDEPEND, for example). So, even though a package is already
8862                         # installed, it may not have all of it's dependencies satisfied, so
8863                         # it may not be usable. If such a package is in the subgraph of
8864                         # deep depenedencies of a scheduled build, that build needs to
8865                         # be cancelled. In order for this type of situation to be
8866                         # recognized, deep traversal of dependencies is required.
8867                         self.myparams.add("deep")
8868
8869                         favorites = resume_data.get("favorites")
8870                         args_set = self._sets["args"]
8871                         if isinstance(favorites, list):
8872                                 args = self._load_favorites(favorites)
8873                         else:
8874                                 args = []
8875
8876                         for task in serialized_tasks:
8877                                 if isinstance(task, Package) and \
8878                                         task.operation == "merge":
8879                                         if not self._add_pkg(task, None):
8880                                                 return False
8881
8882                         # Packages for argument atoms need to be explicitly
8883                         # added via _add_pkg() so that they are included in the
8884                         # digraph (needed at least for --tree display).
8885                         for arg in args:
8886                                 for atom in arg.set:
8887                                         pkg, existing_node = self._select_package(
8888                                                 arg.root_config.root, atom)
8889                                         if existing_node is None and \
8890                                                 pkg is not None:
8891                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8892                                                         root=pkg.root, parent=arg)):
8893                                                         return False
8894
8895                         # Allow unsatisfied deps here to avoid showing a masking
8896                         # message for an unsatisfied dep that isn't necessarily
8897                         # masked.
8898                         if not self._create_graph(allow_unsatisfied=True):
8899                                 return False
8900
8901                         unsatisfied_deps = []
8902                         for dep in self._unsatisfied_deps:
8903                                 if not isinstance(dep.parent, Package):
8904                                         continue
8905                                 if dep.parent.operation == "merge":
8906                                         unsatisfied_deps.append(dep)
8907                                         continue
8908
8909                                 # For unsatisfied deps of installed packages, only account for
8910                                 # them if they are in the subgraph of dependencies of a package
8911                                 # which is scheduled to be installed.
8912                                 unsatisfied_install = False
8913                                 traversed = set()
8914                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8915                                 while dep_stack:
8916                                         node = dep_stack.pop()
8917                                         if not isinstance(node, Package):
8918                                                 continue
8919                                         if node.operation == "merge":
8920                                                 unsatisfied_install = True
8921                                                 break
8922                                         if node in traversed:
8923                                                 continue
8924                                         traversed.add(node)
8925                                         dep_stack.extend(self.digraph.parent_nodes(node))
8926
8927                                 if unsatisfied_install:
8928                                         unsatisfied_deps.append(dep)
8929
8930                         if masked_tasks or unsatisfied_deps:
8931                                 # This probably means that a required package
8932                                 # was dropped via --skipfirst. It makes the
8933                                 # resume list invalid, so convert it to a
8934                                 # UnsatisfiedResumeDep exception.
8935                                 raise self.UnsatisfiedResumeDep(self,
8936                                         masked_tasks + unsatisfied_deps)
8937                         self._serialized_tasks_cache = None
8938                         try:
8939                                 self.altlist()
8940                         except self._unknown_internal_error:
8941                                 return False
8942
8943                 return True
8944
8945         def _load_favorites(self, favorites):
8946                 """
8947                 Use a list of favorites to resume state from a
8948                 previous select_files() call. This creates similar
8949                 DependencyArg instances to those that would have
8950                 been created by the original select_files() call.
8951                 This allows Package instances to be matched with
8952                 DependencyArg instances during graph creation.
8953                 """
8954                 root_config = self.roots[self.target_root]
8955                 getSetAtoms = root_config.setconfig.getSetAtoms
8956                 sets = root_config.sets
8957                 args = []
8958                 for x in favorites:
8959                         if not isinstance(x, basestring):
8960                                 continue
8961                         if x in ("system", "world"):
8962                                 x = SETPREFIX + x
8963                         if x.startswith(SETPREFIX):
8964                                 s = x[len(SETPREFIX):]
8965                                 if s not in sets:
8966                                         continue
8967                                 if s in self._sets:
8968                                         continue
8969                                 # Recursively expand sets so that containment tests in
8970                                 # self._get_parent_sets() properly match atoms in nested
8971                                 # sets (like if world contains system).
8972                                 expanded_set = InternalPackageSet(
8973                                         initial_atoms=getSetAtoms(s))
8974                                 self._sets[s] = expanded_set
8975                                 args.append(SetArg(arg=x, set=expanded_set,
8976                                         root_config=root_config))
8977                         else:
8978                                 if not portage.isvalidatom(x):
8979                                         continue
8980                                 args.append(AtomArg(arg=x, atom=x,
8981                                         root_config=root_config))
8982
8983                 self._set_args(args)
8984                 return args
8985
8986         class UnsatisfiedResumeDep(portage.exception.PortageException):
8987                 """
8988                 A dependency of a resume list is not installed. This
8989                 can occur when a required package is dropped from the
8990                 merge list via --skipfirst.
8991                 """
8992                 def __init__(self, depgraph, value):
8993                         portage.exception.PortageException.__init__(self, value)
8994                         self.depgraph = depgraph
8995
8996         class _internal_exception(portage.exception.PortageException):
8997                 def __init__(self, value=""):
8998                         portage.exception.PortageException.__init__(self, value)
8999
9000         class _unknown_internal_error(_internal_exception):
9001                 """
9002                 Used by the depgraph internally to terminate graph creation.
9003                 The specific reason for the failure should have been dumped
9004                 to stderr, unfortunately, the exact reason for the failure
9005                 may not be known.
9006                 """
9007
9008         class _serialize_tasks_retry(_internal_exception):
9009                 """
9010                 This is raised by the _serialize_tasks() method when it needs to
9011                 be called again for some reason. The only case that it's currently
9012                 used for is when neglected dependencies need to be added to the
9013                 graph in order to avoid making a potentially unsafe decision.
9014                 """
9015
9016         class _dep_check_composite_db(portage.dbapi):
9017                 """
9018                 A dbapi-like interface that is optimized for use in dep_check() calls.
9019                 This is built on top of the existing depgraph package selection logic.
9020                 Some packages that have been added to the graph may be masked from this
9021                 view in order to influence the atom preference selection that occurs
9022                 via dep_check().
9023                 """
9024                 def __init__(self, depgraph, root):
9025                         portage.dbapi.__init__(self)
9026                         self._depgraph = depgraph
9027                         self._root = root
9028                         self._match_cache = {}
9029                         self._cpv_pkg_map = {}
9030
9031                 def _clear_cache(self):
9032                         self._match_cache.clear()
9033                         self._cpv_pkg_map.clear()
9034
9035                 def match(self, atom):
9036                         ret = self._match_cache.get(atom)
9037                         if ret is not None:
9038                                 return ret[:]
9039                         orig_atom = atom
9040                         if "/" not in atom:
9041                                 atom = self._dep_expand(atom)
9042                         pkg, existing = self._depgraph._select_package(self._root, atom)
9043                         if not pkg:
9044                                 ret = []
9045                         else:
9046                                 # Return the highest available from select_package() as well as
9047                                 # any matching slots in the graph db.
9048                                 slots = set()
9049                                 slots.add(pkg.metadata["SLOT"])
9050                                 atom_cp = portage.dep_getkey(atom)
9051                                 if pkg.cp.startswith("virtual/"):
9052                                         # For new-style virtual lookahead that occurs inside
9053                                         # dep_check(), examine all slots. This is needed
9054                                         # so that newer slots will not unnecessarily be pulled in
9055                                         # when a satisfying lower slot is already installed. For
9056                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9057                                         # there's no need to pull in a newer slot to satisfy a
9058                                         # virtual/jdk dependency.
9059                                         for db, pkg_type, built, installed, db_keys in \
9060                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9061                                                 for cpv in db.match(atom):
9062                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9063                                                                 continue
9064                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9065                                 ret = []
9066                                 if self._visible(pkg):
9067                                         self._cpv_pkg_map[pkg.cpv] = pkg
9068                                         ret.append(pkg.cpv)
9069                                 slots.remove(pkg.metadata["SLOT"])
9070                                 while slots:
9071                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9072                                         pkg, existing = self._depgraph._select_package(
9073                                                 self._root, slot_atom)
9074                                         if not pkg:
9075                                                 continue
9076                                         if not self._visible(pkg):
9077                                                 continue
9078                                         self._cpv_pkg_map[pkg.cpv] = pkg
9079                                         ret.append(pkg.cpv)
9080                                 if ret:
9081                                         self._cpv_sort_ascending(ret)
9082                         self._match_cache[orig_atom] = ret
9083                         return ret[:]
9084
9085                 def _visible(self, pkg):
9086                         if pkg.installed and "selective" not in self._depgraph.myparams:
9087                                 try:
9088                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9089                                 except (StopIteration, portage.exception.InvalidDependString):
9090                                         arg = None
9091                                 if arg:
9092                                         return False
9093                         if pkg.installed:
9094                                 try:
9095                                         if not visible(
9096                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9097                                                 return False
9098                                 except portage.exception.InvalidDependString:
9099                                         pass
9100                         in_graph = self._depgraph._slot_pkg_map[
9101                                 self._root].get(pkg.slot_atom)
9102                         if in_graph is None:
9103                                 # Mask choices for packages which are not the highest visible
9104                                 # version within their slot (since they usually trigger slot
9105                                 # conflicts).
9106                                 highest_visible, in_graph = self._depgraph._select_package(
9107                                         self._root, pkg.slot_atom)
9108                                 if pkg != highest_visible:
9109                                         return False
9110                         elif in_graph != pkg:
9111                                 # Mask choices for packages that would trigger a slot
9112                                 # conflict with a previously selected package.
9113                                 return False
9114                         return True
9115
9116                 def _dep_expand(self, atom):
9117                         """
9118                         This is only needed for old installed packages that may
9119                         contain atoms that are not fully qualified with a specific
9120                         category. Emulate the cpv_expand() function that's used by
9121                         dbapi.match() in cases like this. If there are multiple
9122                         matches, it's often due to a new-style virtual that has
9123                         been added, so try to filter those out to avoid raising
9124                         a ValueError.
9125                         """
9126                         root_config = self._depgraph.roots[self._root]
9127                         orig_atom = atom
9128                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9129                         if len(expanded_atoms) > 1:
9130                                 non_virtual_atoms = []
9131                                 for x in expanded_atoms:
9132                                         if not portage.dep_getkey(x).startswith("virtual/"):
9133                                                 non_virtual_atoms.append(x)
9134                                 if len(non_virtual_atoms) == 1:
9135                                         expanded_atoms = non_virtual_atoms
9136                         if len(expanded_atoms) > 1:
9137                                 # compatible with portage.cpv_expand()
9138                                 raise portage.exception.AmbiguousPackageName(
9139                                         [portage.dep_getkey(x) for x in expanded_atoms])
9140                         if expanded_atoms:
9141                                 atom = expanded_atoms[0]
9142                         else:
9143                                 null_atom = insert_category_into_atom(atom, "null")
9144                                 null_cp = portage.dep_getkey(null_atom)
9145                                 cat, atom_pn = portage.catsplit(null_cp)
9146                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9147                                 if virts_p:
9148                                         # Allow the resolver to choose which virtual.
9149                                         atom = insert_category_into_atom(atom, "virtual")
9150                                 else:
9151                                         atom = insert_category_into_atom(atom, "null")
9152                         return atom
9153
9154                 def aux_get(self, cpv, wants):
9155                         metadata = self._cpv_pkg_map[cpv].metadata
9156                         return [metadata.get(x, "") for x in wants]
9157
9158 class RepoDisplay(object):
9159         def __init__(self, roots):
9160                 self._shown_repos = {}
9161                 self._unknown_repo = False
9162                 repo_paths = set()
9163                 for root_config in roots.itervalues():
9164                         portdir = root_config.settings.get("PORTDIR")
9165                         if portdir:
9166                                 repo_paths.add(portdir)
9167                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9168                         if overlays:
9169                                 repo_paths.update(overlays.split())
9170                 repo_paths = list(repo_paths)
9171                 self._repo_paths = repo_paths
9172                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9173                         for repo_path in repo_paths ]
9174
9175                 # pre-allocate index for PORTDIR so that it always has index 0.
9176                 for root_config in roots.itervalues():
9177                         portdb = root_config.trees["porttree"].dbapi
9178                         portdir = portdb.porttree_root
9179                         if portdir:
9180                                 self.repoStr(portdir)
9181
9182         def repoStr(self, repo_path_real):
9183                 real_index = -1
9184                 if repo_path_real:
9185                         real_index = self._repo_paths_real.index(repo_path_real)
9186                 if real_index == -1:
9187                         s = "?"
9188                         self._unknown_repo = True
9189                 else:
9190                         shown_repos = self._shown_repos
9191                         repo_paths = self._repo_paths
9192                         repo_path = repo_paths[real_index]
9193                         index = shown_repos.get(repo_path)
9194                         if index is None:
9195                                 index = len(shown_repos)
9196                                 shown_repos[repo_path] = index
9197                         s = str(index)
9198                 return s
9199
9200         def __str__(self):
9201                 output = []
9202                 shown_repos = self._shown_repos
9203                 unknown_repo = self._unknown_repo
9204                 if shown_repos or self._unknown_repo:
9205                         output.append("Portage tree and overlays:\n")
9206                 show_repo_paths = list(shown_repos)
9207                 for repo_path, repo_index in shown_repos.iteritems():
9208                         show_repo_paths[repo_index] = repo_path
9209                 if show_repo_paths:
9210                         for index, repo_path in enumerate(show_repo_paths):
9211                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9212                 if unknown_repo:
9213                         output.append(" "+teal("[?]") + \
9214                                 " indicates that the source repository could not be determined\n")
9215                 return "".join(output)
9216
9217 class PackageCounters(object):
9218
9219         def __init__(self):
9220                 self.upgrades   = 0
9221                 self.downgrades = 0
9222                 self.new        = 0
9223                 self.newslot    = 0
9224                 self.reinst     = 0
9225                 self.uninst     = 0
9226                 self.blocks     = 0
9227                 self.blocks_satisfied         = 0
9228                 self.totalsize  = 0
9229                 self.restrict_fetch           = 0
9230                 self.restrict_fetch_satisfied = 0
9231                 self.interactive              = 0
9232
9233         def __str__(self):
9234                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9235                 myoutput = []
9236                 details = []
9237                 myoutput.append("Total: %s package" % total_installs)
9238                 if total_installs != 1:
9239                         myoutput.append("s")
9240                 if total_installs != 0:
9241                         myoutput.append(" (")
9242                 if self.upgrades > 0:
9243                         details.append("%s upgrade" % self.upgrades)
9244                         if self.upgrades > 1:
9245                                 details[-1] += "s"
9246                 if self.downgrades > 0:
9247                         details.append("%s downgrade" % self.downgrades)
9248                         if self.downgrades > 1:
9249                                 details[-1] += "s"
9250                 if self.new > 0:
9251                         details.append("%s new" % self.new)
9252                 if self.newslot > 0:
9253                         details.append("%s in new slot" % self.newslot)
9254                         if self.newslot > 1:
9255                                 details[-1] += "s"
9256                 if self.reinst > 0:
9257                         details.append("%s reinstall" % self.reinst)
9258                         if self.reinst > 1:
9259                                 details[-1] += "s"
9260                 if self.uninst > 0:
9261                         details.append("%s uninstall" % self.uninst)
9262                         if self.uninst > 1:
9263                                 details[-1] += "s"
9264                 if self.interactive > 0:
9265                         details.append("%s %s" % (self.interactive,
9266                                 colorize("WARN", "interactive")))
9267                 myoutput.append(", ".join(details))
9268                 if total_installs != 0:
9269                         myoutput.append(")")
9270                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9271                 if self.restrict_fetch:
9272                         myoutput.append("\nFetch Restriction: %s package" % \
9273                                 self.restrict_fetch)
9274                         if self.restrict_fetch > 1:
9275                                 myoutput.append("s")
9276                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9277                         myoutput.append(bad(" (%s unsatisfied)") % \
9278                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9279                 if self.blocks > 0:
9280                         myoutput.append("\nConflict: %s block" % \
9281                                 self.blocks)
9282                         if self.blocks > 1:
9283                                 myoutput.append("s")
9284                         if self.blocks_satisfied < self.blocks:
9285                                 myoutput.append(bad(" (%s unsatisfied)") % \
9286                                         (self.blocks - self.blocks_satisfied))
9287                 return "".join(myoutput)
9288
9289 class UseFlagDisplay(object):
9290
9291         __slots__ = ('name', 'enabled', 'forced')
9292
9293         def __init__(self, name, enabled, forced):
9294                 self.name = name
9295                 self.enabled = enabled
9296                 self.forced = forced
9297
9298         def __str__(self):
9299                 s = self.name
9300                 if self.enabled:
9301                         s = red(s)
9302                 else:
9303                         s = '-' + s
9304                         s = blue(s)
9305                 if self.forced:
9306                         s = '(%s)' % s
9307                 return s
9308
9309         def _cmp_combined(a, b):
9310                 """
9311                 Sort by name, combining enabled and disabled flags.
9312                 """
9313                 return (a.name > b.name) - (a.name < b.name)
9314
9315         sort_combined = cmp_sort_key(_cmp_combined)
9316         del _cmp_combined
9317
9318         def _cmp_separated(a, b):
9319                 """
9320                 Sort by name, separating enabled flags from disabled flags.
9321                 """
9322                 enabled_diff = b.enabled - a.enabled
9323                 if enabled_diff:
9324                         return enabled_diff
9325                 return (a.name > b.name) - (a.name < b.name)
9326
9327         sort_separated = cmp_sort_key(_cmp_separated)
9328         del _cmp_separated
9329
9330 class PollSelectAdapter(PollConstants):
9331
9332         """
9333         Use select to emulate a poll object, for
9334         systems that don't support poll().
9335         """
9336
9337         def __init__(self):
9338                 self._registered = {}
9339                 self._select_args = [[], [], []]
9340
9341         def register(self, fd, *args):
9342                 """
9343                 Only POLLIN is currently supported!
9344                 """
9345                 if len(args) > 1:
9346                         raise TypeError(
9347                                 "register expected at most 2 arguments, got " + \
9348                                 repr(1 + len(args)))
9349
9350                 eventmask = PollConstants.POLLIN | \
9351                         PollConstants.POLLPRI | PollConstants.POLLOUT
9352                 if args:
9353                         eventmask = args[0]
9354
9355                 self._registered[fd] = eventmask
9356                 self._select_args = None
9357
9358         def unregister(self, fd):
9359                 self._select_args = None
9360                 del self._registered[fd]
9361
9362         def poll(self, *args):
9363                 if len(args) > 1:
9364                         raise TypeError(
9365                                 "poll expected at most 2 arguments, got " + \
9366                                 repr(1 + len(args)))
9367
9368                 timeout = None
9369                 if args:
9370                         timeout = args[0]
9371
9372                 select_args = self._select_args
9373                 if select_args is None:
9374                         select_args = [self._registered.keys(), [], []]
9375
9376                 if timeout is not None:
9377                         select_args = select_args[:]
9378                         # Translate poll() timeout args to select() timeout args:
9379                         #
9380                         #          | units        | value(s) for indefinite block
9381                         # ---------|--------------|------------------------------
9382                         #   poll   | milliseconds | omitted, negative, or None
9383                         # ---------|--------------|------------------------------
9384                         #   select | seconds      | omitted
9385                         # ---------|--------------|------------------------------
9386
9387                         if timeout is not None and timeout < 0:
9388                                 timeout = None
9389                         if timeout is not None:
9390                                 select_args.append(timeout / 1000)
9391
9392                 select_events = select.select(*select_args)
9393                 poll_events = []
9394                 for fd in select_events[0]:
9395                         poll_events.append((fd, PollConstants.POLLIN))
9396                 return poll_events
9397
9398 class SequentialTaskQueue(SlotObject):
9399
9400         __slots__ = ("max_jobs", "running_tasks") + \
9401                 ("_dirty", "_scheduling", "_task_queue")
9402
9403         def __init__(self, **kwargs):
9404                 SlotObject.__init__(self, **kwargs)
9405                 self._task_queue = deque()
9406                 self.running_tasks = set()
9407                 if self.max_jobs is None:
9408                         self.max_jobs = 1
9409                 self._dirty = True
9410
9411         def add(self, task):
9412                 self._task_queue.append(task)
9413                 self._dirty = True
9414
9415         def addFront(self, task):
9416                 self._task_queue.appendleft(task)
9417                 self._dirty = True
9418
9419         def schedule(self):
9420
9421                 if not self._dirty:
9422                         return False
9423
9424                 if not self:
9425                         return False
9426
9427                 if self._scheduling:
9428                         # Ignore any recursive schedule() calls triggered via
9429                         # self._task_exit().
9430                         return False
9431
9432                 self._scheduling = True
9433
9434                 task_queue = self._task_queue
9435                 running_tasks = self.running_tasks
9436                 max_jobs = self.max_jobs
9437                 state_changed = False
9438
9439                 while task_queue and \
9440                         (max_jobs is True or len(running_tasks) < max_jobs):
9441                         task = task_queue.popleft()
9442                         cancelled = getattr(task, "cancelled", None)
9443                         if not cancelled:
9444                                 running_tasks.add(task)
9445                                 task.addExitListener(self._task_exit)
9446                                 task.start()
9447                         state_changed = True
9448
9449                 self._dirty = False
9450                 self._scheduling = False
9451
9452                 return state_changed
9453
9454         def _task_exit(self, task):
9455                 """
9456                 Since we can always rely on exit listeners being called, the set of
9457                 running tasks is always pruned automatically and there is never any need
9458                 to actively prune it.
9459                 """
9460                 self.running_tasks.remove(task)
9461                 if self._task_queue:
9462                         self._dirty = True
9463
9464         def clear(self):
9465                 self._task_queue.clear()
9466                 running_tasks = self.running_tasks
9467                 while running_tasks:
9468                         task = running_tasks.pop()
9469                         task.removeExitListener(self._task_exit)
9470                         task.cancel()
9471                 self._dirty = False
9472
9473         def __nonzero__(self):
9474                 return bool(self._task_queue or self.running_tasks)
9475
9476         def __len__(self):
9477                 return len(self._task_queue) + len(self.running_tasks)
9478
9479 _can_poll_device = None
9480
9481 def can_poll_device():
9482         """
9483         Test if it's possible to use poll() on a device such as a pty. This
9484         is known to fail on Darwin.
9485         @rtype: bool
9486         @returns: True if poll() on a device succeeds, False otherwise.
9487         """
9488
9489         global _can_poll_device
9490         if _can_poll_device is not None:
9491                 return _can_poll_device
9492
9493         if not hasattr(select, "poll"):
9494                 _can_poll_device = False
9495                 return _can_poll_device
9496
9497         try:
9498                 dev_null = open('/dev/null', 'rb')
9499         except IOError:
9500                 _can_poll_device = False
9501                 return _can_poll_device
9502
9503         p = select.poll()
9504         p.register(dev_null.fileno(), PollConstants.POLLIN)
9505
9506         invalid_request = False
9507         for f, event in p.poll():
9508                 if event & PollConstants.POLLNVAL:
9509                         invalid_request = True
9510                         break
9511         dev_null.close()
9512
9513         _can_poll_device = not invalid_request
9514         return _can_poll_device
9515
9516 def create_poll_instance():
9517         """
9518         Create an instance of select.poll, or an instance of
9519         PollSelectAdapter there is no poll() implementation or
9520         it is broken somehow.
9521         """
9522         if can_poll_device():
9523                 return select.poll()
9524         return PollSelectAdapter()
9525
9526 getloadavg = getattr(os, "getloadavg", None)
9527 if getloadavg is None:
9528         def getloadavg():
9529                 """
9530                 Uses /proc/loadavg to emulate os.getloadavg().
9531                 Raises OSError if the load average was unobtainable.
9532                 """
9533                 try:
9534                         loadavg_str = open('/proc/loadavg').readline()
9535                 except IOError:
9536                         # getloadavg() is only supposed to raise OSError, so convert
9537                         raise OSError('unknown')
9538                 loadavg_split = loadavg_str.split()
9539                 if len(loadavg_split) < 3:
9540                         raise OSError('unknown')
9541                 loadavg_floats = []
9542                 for i in xrange(3):
9543                         try:
9544                                 loadavg_floats.append(float(loadavg_split[i]))
9545                         except ValueError:
9546                                 raise OSError('unknown')
9547                 return tuple(loadavg_floats)
9548
9549 class PollScheduler(object):
9550
9551         class _sched_iface_class(SlotObject):
9552                 __slots__ = ("register", "schedule", "unregister")
9553
9554         def __init__(self):
9555                 self._max_jobs = 1
9556                 self._max_load = None
9557                 self._jobs = 0
9558                 self._poll_event_queue = []
9559                 self._poll_event_handlers = {}
9560                 self._poll_event_handler_ids = {}
9561                 # Increment id for each new handler.
9562                 self._event_handler_id = 0
9563                 self._poll_obj = create_poll_instance()
9564                 self._scheduling = False
9565
9566         def _schedule(self):
9567                 """
9568                 Calls _schedule_tasks() and automatically returns early from
9569                 any recursive calls to this method that the _schedule_tasks()
9570                 call might trigger. This makes _schedule() safe to call from
9571                 inside exit listeners.
9572                 """
9573                 if self._scheduling:
9574                         return False
9575                 self._scheduling = True
9576                 try:
9577                         return self._schedule_tasks()
9578                 finally:
9579                         self._scheduling = False
9580
9581         def _running_job_count(self):
9582                 return self._jobs
9583
9584         def _can_add_job(self):
9585                 max_jobs = self._max_jobs
9586                 max_load = self._max_load
9587
9588                 if self._max_jobs is not True and \
9589                         self._running_job_count() >= self._max_jobs:
9590                         return False
9591
9592                 if max_load is not None and \
9593                         (max_jobs is True or max_jobs > 1) and \
9594                         self._running_job_count() >= 1:
9595                         try:
9596                                 avg1, avg5, avg15 = getloadavg()
9597                         except OSError:
9598                                 return False
9599
9600                         if avg1 >= max_load:
9601                                 return False
9602
9603                 return True
9604
9605         def _poll(self, timeout=None):
9606                 """
9607                 All poll() calls pass through here. The poll events
9608                 are added directly to self._poll_event_queue.
9609                 In order to avoid endless blocking, this raises
9610                 StopIteration if timeout is None and there are
9611                 no file descriptors to poll.
9612                 """
9613                 if not self._poll_event_handlers:
9614                         self._schedule()
9615                         if timeout is None and \
9616                                 not self._poll_event_handlers:
9617                                 raise StopIteration(
9618                                         "timeout is None and there are no poll() event handlers")
9619
9620                 # The following error is known to occur with Linux kernel versions
9621                 # less than 2.6.24:
9622                 #
9623                 #   select.error: (4, 'Interrupted system call')
9624                 #
9625                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9626                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9627                 # without any events.
9628                 while True:
9629                         try:
9630                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9631                                 break
9632                         except select.error, e:
9633                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9634                                         level=logging.ERROR, noiselevel=-1)
9635                                 del e
9636                                 if timeout is not None:
9637                                         break
9638
9639         def _next_poll_event(self, timeout=None):
9640                 """
9641                 Since the _schedule_wait() loop is called by event
9642                 handlers from _poll_loop(), maintain a central event
9643                 queue for both of them to share events from a single
9644                 poll() call. In order to avoid endless blocking, this
9645                 raises StopIteration if timeout is None and there are
9646                 no file descriptors to poll.
9647                 """
9648                 if not self._poll_event_queue:
9649                         self._poll(timeout)
9650                 return self._poll_event_queue.pop()
9651
9652         def _poll_loop(self):
9653
9654                 event_handlers = self._poll_event_handlers
9655                 event_handled = False
9656
9657                 try:
9658                         while event_handlers:
9659                                 f, event = self._next_poll_event()
9660                                 handler, reg_id = event_handlers[f]
9661                                 handler(f, event)
9662                                 event_handled = True
9663                 except StopIteration:
9664                         event_handled = True
9665
9666                 if not event_handled:
9667                         raise AssertionError("tight loop")
9668
9669         def _schedule_yield(self):
9670                 """
9671                 Schedule for a short period of time chosen by the scheduler based
9672                 on internal state. Synchronous tasks should call this periodically
9673                 in order to allow the scheduler to service pending poll events. The
9674                 scheduler will call poll() exactly once, without blocking, and any
9675                 resulting poll events will be serviced.
9676                 """
9677                 event_handlers = self._poll_event_handlers
9678                 events_handled = 0
9679
9680                 if not event_handlers:
9681                         return bool(events_handled)
9682
9683                 if not self._poll_event_queue:
9684                         self._poll(0)
9685
9686                 try:
9687                         while event_handlers and self._poll_event_queue:
9688                                 f, event = self._next_poll_event()
9689                                 handler, reg_id = event_handlers[f]
9690                                 handler(f, event)
9691                                 events_handled += 1
9692                 except StopIteration:
9693                         events_handled += 1
9694
9695                 return bool(events_handled)
9696
9697         def _register(self, f, eventmask, handler):
9698                 """
9699                 @rtype: Integer
9700                 @return: A unique registration id, for use in schedule() or
9701                         unregister() calls.
9702                 """
9703                 if f in self._poll_event_handlers:
9704                         raise AssertionError("fd %d is already registered" % f)
9705                 self._event_handler_id += 1
9706                 reg_id = self._event_handler_id
9707                 self._poll_event_handler_ids[reg_id] = f
9708                 self._poll_event_handlers[f] = (handler, reg_id)
9709                 self._poll_obj.register(f, eventmask)
9710                 return reg_id
9711
9712         def _unregister(self, reg_id):
9713                 f = self._poll_event_handler_ids[reg_id]
9714                 self._poll_obj.unregister(f)
9715                 del self._poll_event_handlers[f]
9716                 del self._poll_event_handler_ids[reg_id]
9717
9718         def _schedule_wait(self, wait_ids):
9719                 """
9720                 Schedule until wait_id is not longer registered
9721                 for poll() events.
9722                 @type wait_id: int
9723                 @param wait_id: a task id to wait for
9724                 """
9725                 event_handlers = self._poll_event_handlers
9726                 handler_ids = self._poll_event_handler_ids
9727                 event_handled = False
9728
9729                 if isinstance(wait_ids, int):
9730                         wait_ids = frozenset([wait_ids])
9731
9732                 try:
9733                         while wait_ids.intersection(handler_ids):
9734                                 f, event = self._next_poll_event()
9735                                 handler, reg_id = event_handlers[f]
9736                                 handler(f, event)
9737                                 event_handled = True
9738                 except StopIteration:
9739                         event_handled = True
9740
9741                 return event_handled
9742
9743 class QueueScheduler(PollScheduler):
9744
9745         """
9746         Add instances of SequentialTaskQueue and then call run(). The
9747         run() method returns when no tasks remain.
9748         """
9749
9750         def __init__(self, max_jobs=None, max_load=None):
9751                 PollScheduler.__init__(self)
9752
9753                 if max_jobs is None:
9754                         max_jobs = 1
9755
9756                 self._max_jobs = max_jobs
9757                 self._max_load = max_load
9758                 self.sched_iface = self._sched_iface_class(
9759                         register=self._register,
9760                         schedule=self._schedule_wait,
9761                         unregister=self._unregister)
9762
9763                 self._queues = []
9764                 self._schedule_listeners = []
9765
9766         def add(self, q):
9767                 self._queues.append(q)
9768
9769         def remove(self, q):
9770                 self._queues.remove(q)
9771
9772         def run(self):
9773
9774                 while self._schedule():
9775                         self._poll_loop()
9776
9777                 while self._running_job_count():
9778                         self._poll_loop()
9779
9780         def _schedule_tasks(self):
9781                 """
9782                 @rtype: bool
9783                 @returns: True if there may be remaining tasks to schedule,
9784                         False otherwise.
9785                 """
9786                 while self._can_add_job():
9787                         n = self._max_jobs - self._running_job_count()
9788                         if n < 1:
9789                                 break
9790
9791                         if not self._start_next_job(n):
9792                                 return False
9793
9794                 for q in self._queues:
9795                         if q:
9796                                 return True
9797                 return False
9798
9799         def _running_job_count(self):
9800                 job_count = 0
9801                 for q in self._queues:
9802                         job_count += len(q.running_tasks)
9803                 self._jobs = job_count
9804                 return job_count
9805
9806         def _start_next_job(self, n=1):
9807                 started_count = 0
9808                 for q in self._queues:
9809                         initial_job_count = len(q.running_tasks)
9810                         q.schedule()
9811                         final_job_count = len(q.running_tasks)
9812                         if final_job_count > initial_job_count:
9813                                 started_count += (final_job_count - initial_job_count)
9814                         if started_count >= n:
9815                                 break
9816                 return started_count
9817
9818 class TaskScheduler(object):
9819
9820         """
9821         A simple way to handle scheduling of AsynchrousTask instances. Simply
9822         add tasks and call run(). The run() method returns when no tasks remain.
9823         """
9824
9825         def __init__(self, max_jobs=None, max_load=None):
9826                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9827                 self._scheduler = QueueScheduler(
9828                         max_jobs=max_jobs, max_load=max_load)
9829                 self.sched_iface = self._scheduler.sched_iface
9830                 self.run = self._scheduler.run
9831                 self._scheduler.add(self._queue)
9832
9833         def add(self, task):
9834                 self._queue.add(task)
9835
9836 class JobStatusDisplay(object):
9837
9838         _bound_properties = ("curval", "failed", "running")
9839         _jobs_column_width = 48
9840
9841         # Don't update the display unless at least this much
9842         # time has passed, in units of seconds.
9843         _min_display_latency = 2
9844
9845         _default_term_codes = {
9846                 'cr'  : '\r',
9847                 'el'  : '\x1b[K',
9848                 'nel' : '\n',
9849         }
9850
9851         _termcap_name_map = {
9852                 'carriage_return' : 'cr',
9853                 'clr_eol'         : 'el',
9854                 'newline'         : 'nel',
9855         }
9856
9857         def __init__(self, out=sys.stdout, quiet=False):
9858                 object.__setattr__(self, "out", out)
9859                 object.__setattr__(self, "quiet", quiet)
9860                 object.__setattr__(self, "maxval", 0)
9861                 object.__setattr__(self, "merges", 0)
9862                 object.__setattr__(self, "_changed", False)
9863                 object.__setattr__(self, "_displayed", False)
9864                 object.__setattr__(self, "_last_display_time", 0)
9865                 object.__setattr__(self, "width", 80)
9866                 self.reset()
9867
9868                 isatty = hasattr(out, "isatty") and out.isatty()
9869                 object.__setattr__(self, "_isatty", isatty)
9870                 if not isatty or not self._init_term():
9871                         term_codes = {}
9872                         for k, capname in self._termcap_name_map.iteritems():
9873                                 term_codes[k] = self._default_term_codes[capname]
9874                         object.__setattr__(self, "_term_codes", term_codes)
9875                 encoding = sys.getdefaultencoding()
9876                 for k, v in self._term_codes.items():
9877                         if not isinstance(v, basestring):
9878                                 self._term_codes[k] = v.decode(encoding, 'replace')
9879
9880         def _init_term(self):
9881                 """
9882                 Initialize term control codes.
9883                 @rtype: bool
9884                 @returns: True if term codes were successfully initialized,
9885                         False otherwise.
9886                 """
9887
9888                 term_type = os.environ.get("TERM", "vt100")
9889                 tigetstr = None
9890
9891                 try:
9892                         import curses
9893                         try:
9894                                 curses.setupterm(term_type, self.out.fileno())
9895                                 tigetstr = curses.tigetstr
9896                         except curses.error:
9897                                 pass
9898                 except ImportError:
9899                         pass
9900
9901                 if tigetstr is None:
9902                         return False
9903
9904                 term_codes = {}
9905                 for k, capname in self._termcap_name_map.iteritems():
9906                         code = tigetstr(capname)
9907                         if code is None:
9908                                 code = self._default_term_codes[capname]
9909                         term_codes[k] = code
9910                 object.__setattr__(self, "_term_codes", term_codes)
9911                 return True
9912
9913         def _format_msg(self, msg):
9914                 return ">>> %s" % msg
9915
9916         def _erase(self):
9917                 self.out.write(
9918                         self._term_codes['carriage_return'] + \
9919                         self._term_codes['clr_eol'])
9920                 self.out.flush()
9921                 self._displayed = False
9922
9923         def _display(self, line):
9924                 self.out.write(line)
9925                 self.out.flush()
9926                 self._displayed = True
9927
9928         def _update(self, msg):
9929
9930                 out = self.out
9931                 if not self._isatty:
9932                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9933                         self.out.flush()
9934                         self._displayed = True
9935                         return
9936
9937                 if self._displayed:
9938                         self._erase()
9939
9940                 self._display(self._format_msg(msg))
9941
9942         def displayMessage(self, msg):
9943
9944                 was_displayed = self._displayed
9945
9946                 if self._isatty and self._displayed:
9947                         self._erase()
9948
9949                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9950                 self.out.flush()
9951                 self._displayed = False
9952
9953                 if was_displayed:
9954                         self._changed = True
9955                         self.display()
9956
9957         def reset(self):
9958                 self.maxval = 0
9959                 self.merges = 0
9960                 for name in self._bound_properties:
9961                         object.__setattr__(self, name, 0)
9962
9963                 if self._displayed:
9964                         self.out.write(self._term_codes['newline'])
9965                         self.out.flush()
9966                         self._displayed = False
9967
9968         def __setattr__(self, name, value):
9969                 old_value = getattr(self, name)
9970                 if value == old_value:
9971                         return
9972                 object.__setattr__(self, name, value)
9973                 if name in self._bound_properties:
9974                         self._property_change(name, old_value, value)
9975
9976         def _property_change(self, name, old_value, new_value):
9977                 self._changed = True
9978                 self.display()
9979
9980         def _load_avg_str(self):
9981                 try:
9982                         avg = getloadavg()
9983                 except OSError:
9984                         return 'unknown'
9985
9986                 max_avg = max(avg)
9987
9988                 if max_avg < 10:
9989                         digits = 2
9990                 elif max_avg < 100:
9991                         digits = 1
9992                 else:
9993                         digits = 0
9994
9995                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9996
9997         def display(self):
9998                 """
9999                 Display status on stdout, but only if something has
10000                 changed since the last call.
10001                 """
10002
10003                 if self.quiet:
10004                         return
10005
10006                 current_time = time.time()
10007                 time_delta = current_time - self._last_display_time
10008                 if self._displayed and \
10009                         not self._changed:
10010                         if not self._isatty:
10011                                 return
10012                         if time_delta < self._min_display_latency:
10013                                 return
10014
10015                 self._last_display_time = current_time
10016                 self._changed = False
10017                 self._display_status()
10018
10019         def _display_status(self):
10020                 # Don't use len(self._completed_tasks) here since that also
10021                 # can include uninstall tasks.
10022                 curval_str = str(self.curval)
10023                 maxval_str = str(self.maxval)
10024                 running_str = str(self.running)
10025                 failed_str = str(self.failed)
10026                 load_avg_str = self._load_avg_str()
10027
10028                 color_output = StringIO()
10029                 plain_output = StringIO()
10030                 style_file = portage.output.ConsoleStyleFile(color_output)
10031                 style_file.write_listener = plain_output
10032                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10033                 style_writer.style_listener = style_file.new_styles
10034                 f = formatter.AbstractFormatter(style_writer)
10035
10036                 number_style = "INFORM"
10037                 f.add_literal_data("Jobs: ")
10038                 f.push_style(number_style)
10039                 f.add_literal_data(curval_str)
10040                 f.pop_style()
10041                 f.add_literal_data(" of ")
10042                 f.push_style(number_style)
10043                 f.add_literal_data(maxval_str)
10044                 f.pop_style()
10045                 f.add_literal_data(" complete")
10046
10047                 if self.running:
10048                         f.add_literal_data(", ")
10049                         f.push_style(number_style)
10050                         f.add_literal_data(running_str)
10051                         f.pop_style()
10052                         f.add_literal_data(" running")
10053
10054                 if self.failed:
10055                         f.add_literal_data(", ")
10056                         f.push_style(number_style)
10057                         f.add_literal_data(failed_str)
10058                         f.pop_style()
10059                         f.add_literal_data(" failed")
10060
10061                 padding = self._jobs_column_width - len(plain_output.getvalue())
10062                 if padding > 0:
10063                         f.add_literal_data(padding * " ")
10064
10065                 f.add_literal_data("Load avg: ")
10066                 f.add_literal_data(load_avg_str)
10067
10068                 # Truncate to fit width, to avoid making the terminal scroll if the
10069                 # line overflows (happens when the load average is large).
10070                 plain_output = plain_output.getvalue()
10071                 if self._isatty and len(plain_output) > self.width:
10072                         # Use plain_output here since it's easier to truncate
10073                         # properly than the color output which contains console
10074                         # color codes.
10075                         self._update(plain_output[:self.width])
10076                 else:
10077                         self._update(color_output.getvalue())
10078
10079                 xtermTitle(" ".join(plain_output.split()))
10080
10081 class ProgressHandler(object):
10082         def __init__(self):
10083                 self.curval = 0
10084                 self.maxval = 0
10085                 self._last_update = 0
10086                 self.min_latency = 0.2
10087
10088         def onProgress(self, maxval, curval):
10089                 self.maxval = maxval
10090                 self.curval = curval
10091                 cur_time = time.time()
10092                 if cur_time - self._last_update >= self.min_latency:
10093                         self._last_update = cur_time
10094                         self.display()
10095
10096         def display(self):
10097                 raise NotImplementedError(self)
10098
10099 class Scheduler(PollScheduler):
10100
10101         _opts_ignore_blockers = \
10102                 frozenset(["--buildpkgonly",
10103                 "--fetchonly", "--fetch-all-uri",
10104                 "--nodeps", "--pretend"])
10105
10106         _opts_no_background = \
10107                 frozenset(["--pretend",
10108                 "--fetchonly", "--fetch-all-uri"])
10109
10110         _opts_no_restart = frozenset(["--buildpkgonly",
10111                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10112
10113         _bad_resume_opts = set(["--ask", "--changelog",
10114                 "--resume", "--skipfirst"])
10115
10116         _fetch_log = "/var/log/emerge-fetch.log"
10117
10118         class _iface_class(SlotObject):
10119                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10120                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10121                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10122                         "unregister")
10123
10124         class _fetch_iface_class(SlotObject):
10125                 __slots__ = ("log_file", "schedule")
10126
10127         _task_queues_class = slot_dict_class(
10128                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10129
10130         class _build_opts_class(SlotObject):
10131                 __slots__ = ("buildpkg", "buildpkgonly",
10132                         "fetch_all_uri", "fetchonly", "pretend")
10133
10134         class _binpkg_opts_class(SlotObject):
10135                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10136
10137         class _pkg_count_class(SlotObject):
10138                 __slots__ = ("curval", "maxval")
10139
10140         class _emerge_log_class(SlotObject):
10141                 __slots__ = ("xterm_titles",)
10142
10143                 def log(self, *pargs, **kwargs):
10144                         if not self.xterm_titles:
10145                                 # Avoid interference with the scheduler's status display.
10146                                 kwargs.pop("short_msg", None)
10147                         emergelog(self.xterm_titles, *pargs, **kwargs)
10148
10149         class _failed_pkg(SlotObject):
10150                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10151
10152         class _ConfigPool(object):
10153                 """Interface for a task to temporarily allocate a config
10154                 instance from a pool. This allows a task to be constructed
10155                 long before the config instance actually becomes needed, like
10156                 when prefetchers are constructed for the whole merge list."""
10157                 __slots__ = ("_root", "_allocate", "_deallocate")
10158                 def __init__(self, root, allocate, deallocate):
10159                         self._root = root
10160                         self._allocate = allocate
10161                         self._deallocate = deallocate
10162                 def allocate(self):
10163                         return self._allocate(self._root)
10164                 def deallocate(self, settings):
10165                         self._deallocate(settings)
10166
10167         class _unknown_internal_error(portage.exception.PortageException):
10168                 """
10169                 Used internally to terminate scheduling. The specific reason for
10170                 the failure should have been dumped to stderr.
10171                 """
10172                 def __init__(self, value=""):
10173                         portage.exception.PortageException.__init__(self, value)
10174
10175         def __init__(self, settings, trees, mtimedb, myopts,
10176                 spinner, mergelist, favorites, digraph):
10177                 PollScheduler.__init__(self)
10178                 self.settings = settings
10179                 self.target_root = settings["ROOT"]
10180                 self.trees = trees
10181                 self.myopts = myopts
10182                 self._spinner = spinner
10183                 self._mtimedb = mtimedb
10184                 self._mergelist = mergelist
10185                 self._favorites = favorites
10186                 self._args_set = InternalPackageSet(favorites)
10187                 self._build_opts = self._build_opts_class()
10188                 for k in self._build_opts.__slots__:
10189                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10190                 self._binpkg_opts = self._binpkg_opts_class()
10191                 for k in self._binpkg_opts.__slots__:
10192                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10193
10194                 self.curval = 0
10195                 self._logger = self._emerge_log_class()
10196                 self._task_queues = self._task_queues_class()
10197                 for k in self._task_queues.allowed_keys:
10198                         setattr(self._task_queues, k,
10199                                 SequentialTaskQueue())
10200
10201                 # Holds merges that will wait to be executed when no builds are
10202                 # executing. This is useful for system packages since dependencies
10203                 # on system packages are frequently unspecified.
10204                 self._merge_wait_queue = []
10205                 # Holds merges that have been transfered from the merge_wait_queue to
10206                 # the actual merge queue. They are removed from this list upon
10207                 # completion. Other packages can start building only when this list is
10208                 # empty.
10209                 self._merge_wait_scheduled = []
10210
10211                 # Holds system packages and their deep runtime dependencies. Before
10212                 # being merged, these packages go to merge_wait_queue, to be merged
10213                 # when no other packages are building.
10214                 self._deep_system_deps = set()
10215
10216                 # Holds packages to merge which will satisfy currently unsatisfied
10217                 # deep runtime dependencies of system packages. If this is not empty
10218                 # then no parallel builds will be spawned until it is empty. This
10219                 # minimizes the possibility that a build will fail due to the system
10220                 # being in a fragile state. For example, see bug #259954.
10221                 self._unsatisfied_system_deps = set()
10222
10223                 self._status_display = JobStatusDisplay()
10224                 self._max_load = myopts.get("--load-average")
10225                 max_jobs = myopts.get("--jobs")
10226                 if max_jobs is None:
10227                         max_jobs = 1
10228                 self._set_max_jobs(max_jobs)
10229
10230                 # The root where the currently running
10231                 # portage instance is installed.
10232                 self._running_root = trees["/"]["root_config"]
10233                 self.edebug = 0
10234                 if settings.get("PORTAGE_DEBUG", "") == "1":
10235                         self.edebug = 1
10236                 self.pkgsettings = {}
10237                 self._config_pool = {}
10238                 self._blocker_db = {}
10239                 for root in trees:
10240                         self._config_pool[root] = []
10241                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10242
10243                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10244                         schedule=self._schedule_fetch)
10245                 self._sched_iface = self._iface_class(
10246                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10247                         dblinkDisplayMerge=self._dblink_display_merge,
10248                         dblinkElog=self._dblink_elog,
10249                         dblinkEmergeLog=self._dblink_emerge_log,
10250                         fetch=fetch_iface, register=self._register,
10251                         schedule=self._schedule_wait,
10252                         scheduleSetup=self._schedule_setup,
10253                         scheduleUnpack=self._schedule_unpack,
10254                         scheduleYield=self._schedule_yield,
10255                         unregister=self._unregister)
10256
10257                 self._prefetchers = weakref.WeakValueDictionary()
10258                 self._pkg_queue = []
10259                 self._completed_tasks = set()
10260
10261                 self._failed_pkgs = []
10262                 self._failed_pkgs_all = []
10263                 self._failed_pkgs_die_msgs = []
10264                 self._post_mod_echo_msgs = []
10265                 self._parallel_fetch = False
10266                 merge_count = len([x for x in mergelist \
10267                         if isinstance(x, Package) and x.operation == "merge"])
10268                 self._pkg_count = self._pkg_count_class(
10269                         curval=0, maxval=merge_count)
10270                 self._status_display.maxval = self._pkg_count.maxval
10271
10272                 # The load average takes some time to respond when new
10273                 # jobs are added, so we need to limit the rate of adding
10274                 # new jobs.
10275                 self._job_delay_max = 10
10276                 self._job_delay_factor = 1.0
10277                 self._job_delay_exp = 1.5
10278                 self._previous_job_start_time = None
10279
10280                 self._set_digraph(digraph)
10281
10282                 # This is used to memoize the _choose_pkg() result when
10283                 # no packages can be chosen until one of the existing
10284                 # jobs completes.
10285                 self._choose_pkg_return_early = False
10286
10287                 features = self.settings.features
10288                 if "parallel-fetch" in features and \
10289                         not ("--pretend" in self.myopts or \
10290                         "--fetch-all-uri" in self.myopts or \
10291                         "--fetchonly" in self.myopts):
10292                         if "distlocks" not in features:
10293                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10294                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10295                                         "requires the distlocks feature enabled"+"\n",
10296                                         noiselevel=-1)
10297                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10298                                         "thus parallel-fetching is being disabled"+"\n",
10299                                         noiselevel=-1)
10300                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10301                         elif len(mergelist) > 1:
10302                                 self._parallel_fetch = True
10303
10304                 if self._parallel_fetch:
10305                                 # clear out existing fetch log if it exists
10306                                 try:
10307                                         open(self._fetch_log, 'w')
10308                                 except EnvironmentError:
10309                                         pass
10310
10311                 self._running_portage = None
10312                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10313                         portage.const.PORTAGE_PACKAGE_ATOM)
10314                 if portage_match:
10315                         cpv = portage_match.pop()
10316                         self._running_portage = self._pkg(cpv, "installed",
10317                                 self._running_root, installed=True)
10318
10319         def _poll(self, timeout=None):
10320                 self._schedule()
10321                 PollScheduler._poll(self, timeout=timeout)
10322
10323         def _set_max_jobs(self, max_jobs):
10324                 self._max_jobs = max_jobs
10325                 self._task_queues.jobs.max_jobs = max_jobs
10326
10327         def _background_mode(self):
10328                 """
10329                 Check if background mode is enabled and adjust states as necessary.
10330
10331                 @rtype: bool
10332                 @returns: True if background mode is enabled, False otherwise.
10333                 """
10334                 background = (self._max_jobs is True or \
10335                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10336                         not bool(self._opts_no_background.intersection(self.myopts))
10337
10338                 if background:
10339                         interactive_tasks = self._get_interactive_tasks()
10340                         if interactive_tasks:
10341                                 background = False
10342                                 writemsg_level(">>> Sending package output to stdio due " + \
10343                                         "to interactive package(s):\n",
10344                                         level=logging.INFO, noiselevel=-1)
10345                                 msg = [""]
10346                                 for pkg in interactive_tasks:
10347                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10348                                         if pkg.root != "/":
10349                                                 pkg_str += " for " + pkg.root
10350                                         msg.append(pkg_str)
10351                                 msg.append("")
10352                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10353                                         level=logging.INFO, noiselevel=-1)
10354                                 if self._max_jobs is True or self._max_jobs > 1:
10355                                         self._set_max_jobs(1)
10356                                         writemsg_level(">>> Setting --jobs=1 due " + \
10357                                                 "to the above interactive package(s)\n",
10358                                                 level=logging.INFO, noiselevel=-1)
10359
10360                 self._status_display.quiet = \
10361                         not background or \
10362                         ("--quiet" in self.myopts and \
10363                         "--verbose" not in self.myopts)
10364
10365                 self._logger.xterm_titles = \
10366                         "notitles" not in self.settings.features and \
10367                         self._status_display.quiet
10368
10369                 return background
10370
10371         def _get_interactive_tasks(self):
10372                 from portage import flatten
10373                 from portage.dep import use_reduce, paren_reduce
10374                 interactive_tasks = []
10375                 for task in self._mergelist:
10376                         if not (isinstance(task, Package) and \
10377                                 task.operation == "merge"):
10378                                 continue
10379                         try:
10380                                 properties = flatten(use_reduce(paren_reduce(
10381                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10382                         except portage.exception.InvalidDependString, e:
10383                                 show_invalid_depstring_notice(task,
10384                                         task.metadata["PROPERTIES"], str(e))
10385                                 raise self._unknown_internal_error()
10386                         if "interactive" in properties:
10387                                 interactive_tasks.append(task)
10388                 return interactive_tasks
10389
10390         def _set_digraph(self, digraph):
10391                 if "--nodeps" in self.myopts or \
10392                         (self._max_jobs is not True and self._max_jobs < 2):
10393                         # save some memory
10394                         self._digraph = None
10395                         return
10396
10397                 self._digraph = digraph
10398                 self._find_system_deps()
10399                 self._prune_digraph()
10400                 self._prevent_builddir_collisions()
10401
10402         def _find_system_deps(self):
10403                 """
10404                 Find system packages and their deep runtime dependencies. Before being
10405                 merged, these packages go to merge_wait_queue, to be merged when no
10406                 other packages are building.
10407                 """
10408                 deep_system_deps = self._deep_system_deps
10409                 deep_system_deps.clear()
10410                 deep_system_deps.update(
10411                         _find_deep_system_runtime_deps(self._digraph))
10412                 deep_system_deps.difference_update([pkg for pkg in \
10413                         deep_system_deps if pkg.operation != "merge"])
10414
10415         def _prune_digraph(self):
10416                 """
10417                 Prune any root nodes that are irrelevant.
10418                 """
10419
10420                 graph = self._digraph
10421                 completed_tasks = self._completed_tasks
10422                 removed_nodes = set()
10423                 while True:
10424                         for node in graph.root_nodes():
10425                                 if not isinstance(node, Package) or \
10426                                         (node.installed and node.operation == "nomerge") or \
10427                                         node.onlydeps or \
10428                                         node in completed_tasks:
10429                                         removed_nodes.add(node)
10430                         if removed_nodes:
10431                                 graph.difference_update(removed_nodes)
10432                         if not removed_nodes:
10433                                 break
10434                         removed_nodes.clear()
10435
10436         def _prevent_builddir_collisions(self):
10437                 """
10438                 When building stages, sometimes the same exact cpv needs to be merged
10439                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10440                 in the builddir. Currently, normal file locks would be inappropriate
10441                 for this purpose since emerge holds all of it's build dir locks from
10442                 the main process.
10443                 """
10444                 cpv_map = {}
10445                 for pkg in self._mergelist:
10446                         if not isinstance(pkg, Package):
10447                                 # a satisfied blocker
10448                                 continue
10449                         if pkg.installed:
10450                                 continue
10451                         if pkg.cpv not in cpv_map:
10452                                 cpv_map[pkg.cpv] = [pkg]
10453                                 continue
10454                         for earlier_pkg in cpv_map[pkg.cpv]:
10455                                 self._digraph.add(earlier_pkg, pkg,
10456                                         priority=DepPriority(buildtime=True))
10457                         cpv_map[pkg.cpv].append(pkg)
10458
10459         class _pkg_failure(portage.exception.PortageException):
10460                 """
10461                 An instance of this class is raised by unmerge() when
10462                 an uninstallation fails.
10463                 """
10464                 status = 1
10465                 def __init__(self, *pargs):
10466                         portage.exception.PortageException.__init__(self, pargs)
10467                         if pargs:
10468                                 self.status = pargs[0]
10469
10470         def _schedule_fetch(self, fetcher):
10471                 """
10472                 Schedule a fetcher on the fetch queue, in order to
10473                 serialize access to the fetch log.
10474                 """
10475                 self._task_queues.fetch.addFront(fetcher)
10476
10477         def _schedule_setup(self, setup_phase):
10478                 """
10479                 Schedule a setup phase on the merge queue, in order to
10480                 serialize unsandboxed access to the live filesystem.
10481                 """
10482                 self._task_queues.merge.addFront(setup_phase)
10483                 self._schedule()
10484
10485         def _schedule_unpack(self, unpack_phase):
10486                 """
10487                 Schedule an unpack phase on the unpack queue, in order
10488                 to serialize $DISTDIR access for live ebuilds.
10489                 """
10490                 self._task_queues.unpack.add(unpack_phase)
10491
10492         def _find_blockers(self, new_pkg):
10493                 """
10494                 Returns a callable which should be called only when
10495                 the vdb lock has been acquired.
10496                 """
10497                 def get_blockers():
10498                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10499                 return get_blockers
10500
10501         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10502                 if self._opts_ignore_blockers.intersection(self.myopts):
10503                         return None
10504
10505                 # Call gc.collect() here to avoid heap overflow that
10506                 # triggers 'Cannot allocate memory' errors (reported
10507                 # with python-2.5).
10508                 import gc
10509                 gc.collect()
10510
10511                 blocker_db = self._blocker_db[new_pkg.root]
10512
10513                 blocker_dblinks = []
10514                 for blocking_pkg in blocker_db.findInstalledBlockers(
10515                         new_pkg, acquire_lock=acquire_lock):
10516                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10517                                 continue
10518                         if new_pkg.cpv == blocking_pkg.cpv:
10519                                 continue
10520                         blocker_dblinks.append(portage.dblink(
10521                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10522                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10523                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10524
10525                 gc.collect()
10526
10527                 return blocker_dblinks
10528
10529         def _dblink_pkg(self, pkg_dblink):
10530                 cpv = pkg_dblink.mycpv
10531                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10532                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10533                 installed = type_name == "installed"
10534                 return self._pkg(cpv, type_name, root_config, installed=installed)
10535
10536         def _append_to_log_path(self, log_path, msg):
10537                 f = open(log_path, 'a')
10538                 try:
10539                         f.write(msg)
10540                 finally:
10541                         f.close()
10542
10543         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10544
10545                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10546                 log_file = None
10547                 out = sys.stdout
10548                 background = self._background
10549
10550                 if background and log_path is not None:
10551                         log_file = open(log_path, 'a')
10552                         out = log_file
10553
10554                 try:
10555                         for msg in msgs:
10556                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10557                 finally:
10558                         if log_file is not None:
10559                                 log_file.close()
10560
10561         def _dblink_emerge_log(self, msg):
10562                 self._logger.log(msg)
10563
10564         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10565                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10566                 background = self._background
10567
10568                 if log_path is None:
10569                         if not (background and level < logging.WARN):
10570                                 portage.util.writemsg_level(msg,
10571                                         level=level, noiselevel=noiselevel)
10572                 else:
10573                         if not background:
10574                                 portage.util.writemsg_level(msg,
10575                                         level=level, noiselevel=noiselevel)
10576                         self._append_to_log_path(log_path, msg)
10577
10578         def _dblink_ebuild_phase(self,
10579                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10580                 """
10581                 Using this callback for merge phases allows the scheduler
10582                 to run while these phases execute asynchronously, and allows
10583                 the scheduler control output handling.
10584                 """
10585
10586                 scheduler = self._sched_iface
10587                 settings = pkg_dblink.settings
10588                 pkg = self._dblink_pkg(pkg_dblink)
10589                 background = self._background
10590                 log_path = settings.get("PORTAGE_LOG_FILE")
10591
10592                 ebuild_phase = EbuildPhase(background=background,
10593                         pkg=pkg, phase=phase, scheduler=scheduler,
10594                         settings=settings, tree=pkg_dblink.treetype)
10595                 ebuild_phase.start()
10596                 ebuild_phase.wait()
10597
10598                 return ebuild_phase.returncode
10599
10600         def _generate_digests(self):
10601                 """
10602                 Generate digests if necessary for --digests or FEATURES=digest.
10603                 In order to avoid interference, this must done before parallel
10604                 tasks are started.
10605                 """
10606
10607                 if '--fetchonly' in self.myopts:
10608                         return os.EX_OK
10609
10610                 digest = '--digest' in self.myopts
10611                 if not digest:
10612                         for pkgsettings in self.pkgsettings.itervalues():
10613                                 if 'digest' in pkgsettings.features:
10614                                         digest = True
10615                                         break
10616
10617                 if not digest:
10618                         return os.EX_OK
10619
10620                 for x in self._mergelist:
10621                         if not isinstance(x, Package) or \
10622                                 x.type_name != 'ebuild' or \
10623                                 x.operation != 'merge':
10624                                 continue
10625                         pkgsettings = self.pkgsettings[x.root]
10626                         if '--digest' not in self.myopts and \
10627                                 'digest' not in pkgsettings.features:
10628                                 continue
10629                         portdb = x.root_config.trees['porttree'].dbapi
10630                         ebuild_path = portdb.findname(x.cpv)
10631                         if not ebuild_path:
10632                                 writemsg_level(
10633                                         "!!! Could not locate ebuild for '%s'.\n" \
10634                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10635                                 return 1
10636                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10637                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10638                                 writemsg_level(
10639                                         "!!! Unable to generate manifest for '%s'.\n" \
10640                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10641                                 return 1
10642
10643                 return os.EX_OK
10644
10645         def _check_manifests(self):
10646                 # Verify all the manifests now so that the user is notified of failure
10647                 # as soon as possible.
10648                 if "strict" not in self.settings.features or \
10649                         "--fetchonly" in self.myopts or \
10650                         "--fetch-all-uri" in self.myopts:
10651                         return os.EX_OK
10652
10653                 shown_verifying_msg = False
10654                 quiet_settings = {}
10655                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10656                         quiet_config = portage.config(clone=pkgsettings)
10657                         quiet_config["PORTAGE_QUIET"] = "1"
10658                         quiet_config.backup_changes("PORTAGE_QUIET")
10659                         quiet_settings[myroot] = quiet_config
10660                         del quiet_config
10661
10662                 for x in self._mergelist:
10663                         if not isinstance(x, Package) or \
10664                                 x.type_name != "ebuild":
10665                                 continue
10666
10667                         if not shown_verifying_msg:
10668                                 shown_verifying_msg = True
10669                                 self._status_msg("Verifying ebuild manifests")
10670
10671                         root_config = x.root_config
10672                         portdb = root_config.trees["porttree"].dbapi
10673                         quiet_config = quiet_settings[root_config.root]
10674                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10675                         if not portage.digestcheck([], quiet_config, strict=True):
10676                                 return 1
10677
10678                 return os.EX_OK
10679
10680         def _add_prefetchers(self):
10681
10682                 if not self._parallel_fetch:
10683                         return
10684
10685                 if self._parallel_fetch:
10686                         self._status_msg("Starting parallel fetch")
10687
10688                         prefetchers = self._prefetchers
10689                         getbinpkg = "--getbinpkg" in self.myopts
10690
10691                         # In order to avoid "waiting for lock" messages
10692                         # at the beginning, which annoy users, never
10693                         # spawn a prefetcher for the first package.
10694                         for pkg in self._mergelist[1:]:
10695                                 prefetcher = self._create_prefetcher(pkg)
10696                                 if prefetcher is not None:
10697                                         self._task_queues.fetch.add(prefetcher)
10698                                         prefetchers[pkg] = prefetcher
10699
10700         def _create_prefetcher(self, pkg):
10701                 """
10702                 @return: a prefetcher, or None if not applicable
10703                 """
10704                 prefetcher = None
10705
10706                 if not isinstance(pkg, Package):
10707                         pass
10708
10709                 elif pkg.type_name == "ebuild":
10710
10711                         prefetcher = EbuildFetcher(background=True,
10712                                 config_pool=self._ConfigPool(pkg.root,
10713                                 self._allocate_config, self._deallocate_config),
10714                                 fetchonly=1, logfile=self._fetch_log,
10715                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10716
10717                 elif pkg.type_name == "binary" and \
10718                         "--getbinpkg" in self.myopts and \
10719                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10720
10721                         prefetcher = BinpkgPrefetcher(background=True,
10722                                 pkg=pkg, scheduler=self._sched_iface)
10723
10724                 return prefetcher
10725
10726         def _is_restart_scheduled(self):
10727                 """
10728                 Check if the merge list contains a replacement
10729                 for the current running instance, that will result
10730                 in restart after merge.
10731                 @rtype: bool
10732                 @returns: True if a restart is scheduled, False otherwise.
10733                 """
10734                 if self._opts_no_restart.intersection(self.myopts):
10735                         return False
10736
10737                 mergelist = self._mergelist
10738
10739                 for i, pkg in enumerate(mergelist):
10740                         if self._is_restart_necessary(pkg) and \
10741                                 i != len(mergelist) - 1:
10742                                 return True
10743
10744                 return False
10745
10746         def _is_restart_necessary(self, pkg):
10747                 """
10748                 @return: True if merging the given package
10749                         requires restart, False otherwise.
10750                 """
10751
10752                 # Figure out if we need a restart.
10753                 if pkg.root == self._running_root.root and \
10754                         portage.match_from_list(
10755                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10756                         if self._running_portage:
10757                                 return pkg.cpv != self._running_portage.cpv
10758                         return True
10759                 return False
10760
10761         def _restart_if_necessary(self, pkg):
10762                 """
10763                 Use execv() to restart emerge. This happens
10764                 if portage upgrades itself and there are
10765                 remaining packages in the list.
10766                 """
10767
10768                 if self._opts_no_restart.intersection(self.myopts):
10769                         return
10770
10771                 if not self._is_restart_necessary(pkg):
10772                         return
10773
10774                 if pkg == self._mergelist[-1]:
10775                         return
10776
10777                 self._main_loop_cleanup()
10778
10779                 logger = self._logger
10780                 pkg_count = self._pkg_count
10781                 mtimedb = self._mtimedb
10782                 bad_resume_opts = self._bad_resume_opts
10783
10784                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10785                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10786
10787                 logger.log(" *** RESTARTING " + \
10788                         "emerge via exec() after change of " + \
10789                         "portage version.")
10790
10791                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10792                 mtimedb.commit()
10793                 portage.run_exitfuncs()
10794                 mynewargv = [sys.argv[0], "--resume"]
10795                 resume_opts = self.myopts.copy()
10796                 # For automatic resume, we need to prevent
10797                 # any of bad_resume_opts from leaking in
10798                 # via EMERGE_DEFAULT_OPTS.
10799                 resume_opts["--ignore-default-opts"] = True
10800                 for myopt, myarg in resume_opts.iteritems():
10801                         if myopt not in bad_resume_opts:
10802                                 if myarg is True:
10803                                         mynewargv.append(myopt)
10804                                 else:
10805                                         mynewargv.append(myopt +"="+ str(myarg))
10806                 # priority only needs to be adjusted on the first run
10807                 os.environ["PORTAGE_NICENESS"] = "0"
10808                 os.execv(mynewargv[0], mynewargv)
10809
10810         def merge(self):
10811
10812                 if "--resume" in self.myopts:
10813                         # We're resuming.
10814                         portage.writemsg_stdout(
10815                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10816                         self._logger.log(" *** Resuming merge...")
10817
10818                 self._save_resume_list()
10819
10820                 try:
10821                         self._background = self._background_mode()
10822                 except self._unknown_internal_error:
10823                         return 1
10824
10825                 for root in self.trees:
10826                         root_config = self.trees[root]["root_config"]
10827
10828                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10829                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10830                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10831                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10832                         if not tmpdir or not os.path.isdir(tmpdir):
10833                                 msg = "The directory specified in your " + \
10834                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10835                                 "does not exist. Please create this " + \
10836                                 "directory or correct your PORTAGE_TMPDIR setting."
10837                                 msg = textwrap.wrap(msg, 70)
10838                                 out = portage.output.EOutput()
10839                                 for l in msg:
10840                                         out.eerror(l)
10841                                 return 1
10842
10843                         if self._background:
10844                                 root_config.settings.unlock()
10845                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10846                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10847                                 root_config.settings.lock()
10848
10849                         self.pkgsettings[root] = portage.config(
10850                                 clone=root_config.settings)
10851
10852                 rval = self._generate_digests()
10853                 if rval != os.EX_OK:
10854                         return rval
10855
10856                 rval = self._check_manifests()
10857                 if rval != os.EX_OK:
10858                         return rval
10859
10860                 keep_going = "--keep-going" in self.myopts
10861                 fetchonly = self._build_opts.fetchonly
10862                 mtimedb = self._mtimedb
10863                 failed_pkgs = self._failed_pkgs
10864
10865                 while True:
10866                         rval = self._merge()
10867                         if rval == os.EX_OK or fetchonly or not keep_going:
10868                                 break
10869                         if "resume" not in mtimedb:
10870                                 break
10871                         mergelist = self._mtimedb["resume"].get("mergelist")
10872                         if not mergelist:
10873                                 break
10874
10875                         if not failed_pkgs:
10876                                 break
10877
10878                         for failed_pkg in failed_pkgs:
10879                                 mergelist.remove(list(failed_pkg.pkg))
10880
10881                         self._failed_pkgs_all.extend(failed_pkgs)
10882                         del failed_pkgs[:]
10883
10884                         if not mergelist:
10885                                 break
10886
10887                         if not self._calc_resume_list():
10888                                 break
10889
10890                         clear_caches(self.trees)
10891                         if not self._mergelist:
10892                                 break
10893
10894                         self._save_resume_list()
10895                         self._pkg_count.curval = 0
10896                         self._pkg_count.maxval = len([x for x in self._mergelist \
10897                                 if isinstance(x, Package) and x.operation == "merge"])
10898                         self._status_display.maxval = self._pkg_count.maxval
10899
10900                 self._logger.log(" *** Finished. Cleaning up...")
10901
10902                 if failed_pkgs:
10903                         self._failed_pkgs_all.extend(failed_pkgs)
10904                         del failed_pkgs[:]
10905
10906                 background = self._background
10907                 failure_log_shown = False
10908                 if background and len(self._failed_pkgs_all) == 1:
10909                         # If only one package failed then just show it's
10910                         # whole log for easy viewing.
10911                         failed_pkg = self._failed_pkgs_all[-1]
10912                         build_dir = failed_pkg.build_dir
10913                         log_file = None
10914
10915                         log_paths = [failed_pkg.build_log]
10916
10917                         log_path = self._locate_failure_log(failed_pkg)
10918                         if log_path is not None:
10919                                 try:
10920                                         log_file = open(log_path)
10921                                 except IOError:
10922                                         pass
10923
10924                         if log_file is not None:
10925                                 try:
10926                                         for line in log_file:
10927                                                 writemsg_level(line, noiselevel=-1)
10928                                 finally:
10929                                         log_file.close()
10930                                 failure_log_shown = True
10931
10932                 # Dump mod_echo output now since it tends to flood the terminal.
10933                 # This allows us to avoid having more important output, generated
10934                 # later, from being swept away by the mod_echo output.
10935                 mod_echo_output =  _flush_elog_mod_echo()
10936
10937                 if background and not failure_log_shown and \
10938                         self._failed_pkgs_all and \
10939                         self._failed_pkgs_die_msgs and \
10940                         not mod_echo_output:
10941
10942                         printer = portage.output.EOutput()
10943                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10944                                 root_msg = ""
10945                                 if mysettings["ROOT"] != "/":
10946                                         root_msg = " merged to %s" % mysettings["ROOT"]
10947                                 print
10948                                 printer.einfo("Error messages for package %s%s:" % \
10949                                         (colorize("INFORM", key), root_msg))
10950                                 print
10951                                 for phase in portage.const.EBUILD_PHASES:
10952                                         if phase not in logentries:
10953                                                 continue
10954                                         for msgtype, msgcontent in logentries[phase]:
10955                                                 if isinstance(msgcontent, basestring):
10956                                                         msgcontent = [msgcontent]
10957                                                 for line in msgcontent:
10958                                                         printer.eerror(line.strip("\n"))
10959
10960                 if self._post_mod_echo_msgs:
10961                         for msg in self._post_mod_echo_msgs:
10962                                 msg()
10963
10964                 if len(self._failed_pkgs_all) > 1 or \
10965                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10966                         if len(self._failed_pkgs_all) > 1:
10967                                 msg = "The following %d packages have " % \
10968                                         len(self._failed_pkgs_all) + \
10969                                         "failed to build or install:"
10970                         else:
10971                                 msg = "The following package has " + \
10972                                         "failed to build or install:"
10973                         prefix = bad(" * ")
10974                         writemsg(prefix + "\n", noiselevel=-1)
10975                         from textwrap import wrap
10976                         for line in wrap(msg, 72):
10977                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10978                         writemsg(prefix + "\n", noiselevel=-1)
10979                         for failed_pkg in self._failed_pkgs_all:
10980                                 writemsg("%s\t%s\n" % (prefix,
10981                                         colorize("INFORM", str(failed_pkg.pkg))),
10982                                         noiselevel=-1)
10983                         writemsg(prefix + "\n", noiselevel=-1)
10984
10985                 return rval
10986
10987         def _elog_listener(self, mysettings, key, logentries, fulltext):
10988                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10989                 if errors:
10990                         self._failed_pkgs_die_msgs.append(
10991                                 (mysettings, key, errors))
10992
10993         def _locate_failure_log(self, failed_pkg):
10994
10995                 build_dir = failed_pkg.build_dir
10996                 log_file = None
10997
10998                 log_paths = [failed_pkg.build_log]
10999
11000                 for log_path in log_paths:
11001                         if not log_path:
11002                                 continue
11003
11004                         try:
11005                                 log_size = os.stat(log_path).st_size
11006                         except OSError:
11007                                 continue
11008
11009                         if log_size == 0:
11010                                 continue
11011
11012                         return log_path
11013
11014                 return None
11015
11016         def _add_packages(self):
11017                 pkg_queue = self._pkg_queue
11018                 for pkg in self._mergelist:
11019                         if isinstance(pkg, Package):
11020                                 pkg_queue.append(pkg)
11021                         elif isinstance(pkg, Blocker):
11022                                 pass
11023
11024         def _system_merge_started(self, merge):
11025                 """
11026                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11027                 """
11028                 graph = self._digraph
11029                 if graph is None:
11030                         return
11031                 pkg = merge.merge.pkg
11032
11033                 # Skip this if $ROOT != / since it shouldn't matter if there
11034                 # are unsatisfied system runtime deps in this case.
11035                 if pkg.root != '/':
11036                         return
11037
11038                 completed_tasks = self._completed_tasks
11039                 unsatisfied = self._unsatisfied_system_deps
11040
11041                 def ignore_non_runtime_or_satisfied(priority):
11042                         """
11043                         Ignore non-runtime and satisfied runtime priorities.
11044                         """
11045                         if isinstance(priority, DepPriority) and \
11046                                 not priority.satisfied and \
11047                                 (priority.runtime or priority.runtime_post):
11048                                 return False
11049                         return True
11050
11051                 # When checking for unsatisfied runtime deps, only check
11052                 # direct deps since indirect deps are checked when the
11053                 # corresponding parent is merged.
11054                 for child in graph.child_nodes(pkg,
11055                         ignore_priority=ignore_non_runtime_or_satisfied):
11056                         if not isinstance(child, Package) or \
11057                                 child.operation == 'uninstall':
11058                                 continue
11059                         if child is pkg:
11060                                 continue
11061                         if child.operation == 'merge' and \
11062                                 child not in completed_tasks:
11063                                 unsatisfied.add(child)
11064
11065         def _merge_wait_exit_handler(self, task):
11066                 self._merge_wait_scheduled.remove(task)
11067                 self._merge_exit(task)
11068
11069         def _merge_exit(self, merge):
11070                 self._do_merge_exit(merge)
11071                 self._deallocate_config(merge.merge.settings)
11072                 if merge.returncode == os.EX_OK and \
11073                         not merge.merge.pkg.installed:
11074                         self._status_display.curval += 1
11075                 self._status_display.merges = len(self._task_queues.merge)
11076                 self._schedule()
11077
11078         def _do_merge_exit(self, merge):
11079                 pkg = merge.merge.pkg
11080                 if merge.returncode != os.EX_OK:
11081                         settings = merge.merge.settings
11082                         build_dir = settings.get("PORTAGE_BUILDDIR")
11083                         build_log = settings.get("PORTAGE_LOG_FILE")
11084
11085                         self._failed_pkgs.append(self._failed_pkg(
11086                                 build_dir=build_dir, build_log=build_log,
11087                                 pkg=pkg,
11088                                 returncode=merge.returncode))
11089                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11090
11091                         self._status_display.failed = len(self._failed_pkgs)
11092                         return
11093
11094                 self._task_complete(pkg)
11095                 pkg_to_replace = merge.merge.pkg_to_replace
11096                 if pkg_to_replace is not None:
11097                         # When a package is replaced, mark it's uninstall
11098                         # task complete (if any).
11099                         uninst_hash_key = \
11100                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11101                         self._task_complete(uninst_hash_key)
11102
11103                 if pkg.installed:
11104                         return
11105
11106                 self._restart_if_necessary(pkg)
11107
11108                 # Call mtimedb.commit() after each merge so that
11109                 # --resume still works after being interrupted
11110                 # by reboot, sigkill or similar.
11111                 mtimedb = self._mtimedb
11112                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11113                 if not mtimedb["resume"]["mergelist"]:
11114                         del mtimedb["resume"]
11115                 mtimedb.commit()
11116
11117         def _build_exit(self, build):
11118                 if build.returncode == os.EX_OK:
11119                         self.curval += 1
11120                         merge = PackageMerge(merge=build)
11121                         if not build.build_opts.buildpkgonly and \
11122                                 build.pkg in self._deep_system_deps:
11123                                 # Since dependencies on system packages are frequently
11124                                 # unspecified, merge them only when no builds are executing.
11125                                 self._merge_wait_queue.append(merge)
11126                                 merge.addStartListener(self._system_merge_started)
11127                         else:
11128                                 merge.addExitListener(self._merge_exit)
11129                                 self._task_queues.merge.add(merge)
11130                                 self._status_display.merges = len(self._task_queues.merge)
11131                 else:
11132                         settings = build.settings
11133                         build_dir = settings.get("PORTAGE_BUILDDIR")
11134                         build_log = settings.get("PORTAGE_LOG_FILE")
11135
11136                         self._failed_pkgs.append(self._failed_pkg(
11137                                 build_dir=build_dir, build_log=build_log,
11138                                 pkg=build.pkg,
11139                                 returncode=build.returncode))
11140                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11141
11142                         self._status_display.failed = len(self._failed_pkgs)
11143                         self._deallocate_config(build.settings)
11144                 self._jobs -= 1
11145                 self._status_display.running = self._jobs
11146                 self._schedule()
11147
11148         def _extract_exit(self, build):
11149                 self._build_exit(build)
11150
11151         def _task_complete(self, pkg):
11152                 self._completed_tasks.add(pkg)
11153                 self._unsatisfied_system_deps.discard(pkg)
11154                 self._choose_pkg_return_early = False
11155
11156         def _merge(self):
11157
11158                 self._add_prefetchers()
11159                 self._add_packages()
11160                 pkg_queue = self._pkg_queue
11161                 failed_pkgs = self._failed_pkgs
11162                 portage.locks._quiet = self._background
11163                 portage.elog._emerge_elog_listener = self._elog_listener
11164                 rval = os.EX_OK
11165
11166                 try:
11167                         self._main_loop()
11168                 finally:
11169                         self._main_loop_cleanup()
11170                         portage.locks._quiet = False
11171                         portage.elog._emerge_elog_listener = None
11172                         if failed_pkgs:
11173                                 rval = failed_pkgs[-1].returncode
11174
11175                 return rval
11176
11177         def _main_loop_cleanup(self):
11178                 del self._pkg_queue[:]
11179                 self._completed_tasks.clear()
11180                 self._deep_system_deps.clear()
11181                 self._unsatisfied_system_deps.clear()
11182                 self._choose_pkg_return_early = False
11183                 self._status_display.reset()
11184                 self._digraph = None
11185                 self._task_queues.fetch.clear()
11186
11187         def _choose_pkg(self):
11188                 """
11189                 Choose a task that has all it's dependencies satisfied.
11190                 """
11191
11192                 if self._choose_pkg_return_early:
11193                         return None
11194
11195                 if self._digraph is None:
11196                         if (self._jobs or self._task_queues.merge) and \
11197                                 not ("--nodeps" in self.myopts and \
11198                                 (self._max_jobs is True or self._max_jobs > 1)):
11199                                 self._choose_pkg_return_early = True
11200                                 return None
11201                         return self._pkg_queue.pop(0)
11202
11203                 if not (self._jobs or self._task_queues.merge):
11204                         return self._pkg_queue.pop(0)
11205
11206                 self._prune_digraph()
11207
11208                 chosen_pkg = None
11209                 later = set(self._pkg_queue)
11210                 for pkg in self._pkg_queue:
11211                         later.remove(pkg)
11212                         if not self._dependent_on_scheduled_merges(pkg, later):
11213                                 chosen_pkg = pkg
11214                                 break
11215
11216                 if chosen_pkg is not None:
11217                         self._pkg_queue.remove(chosen_pkg)
11218
11219                 if chosen_pkg is None:
11220                         # There's no point in searching for a package to
11221                         # choose until at least one of the existing jobs
11222                         # completes.
11223                         self._choose_pkg_return_early = True
11224
11225                 return chosen_pkg
11226
11227         def _dependent_on_scheduled_merges(self, pkg, later):
11228                 """
11229                 Traverse the subgraph of the given packages deep dependencies
11230                 to see if it contains any scheduled merges.
11231                 @param pkg: a package to check dependencies for
11232                 @type pkg: Package
11233                 @param later: packages for which dependence should be ignored
11234                         since they will be merged later than pkg anyway and therefore
11235                         delaying the merge of pkg will not result in a more optimal
11236                         merge order
11237                 @type later: set
11238                 @rtype: bool
11239                 @returns: True if the package is dependent, False otherwise.
11240                 """
11241
11242                 graph = self._digraph
11243                 completed_tasks = self._completed_tasks
11244
11245                 dependent = False
11246                 traversed_nodes = set([pkg])
11247                 direct_deps = graph.child_nodes(pkg)
11248                 node_stack = direct_deps
11249                 direct_deps = frozenset(direct_deps)
11250                 while node_stack:
11251                         node = node_stack.pop()
11252                         if node in traversed_nodes:
11253                                 continue
11254                         traversed_nodes.add(node)
11255                         if not ((node.installed and node.operation == "nomerge") or \
11256                                 (node.operation == "uninstall" and \
11257                                 node not in direct_deps) or \
11258                                 node in completed_tasks or \
11259                                 node in later):
11260                                 dependent = True
11261                                 break
11262                         node_stack.extend(graph.child_nodes(node))
11263
11264                 return dependent
11265
11266         def _allocate_config(self, root):
11267                 """
11268                 Allocate a unique config instance for a task in order
11269                 to prevent interference between parallel tasks.
11270                 """
11271                 if self._config_pool[root]:
11272                         temp_settings = self._config_pool[root].pop()
11273                 else:
11274                         temp_settings = portage.config(clone=self.pkgsettings[root])
11275                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11276                 # performance reasons, call it here to make sure all settings from the
11277                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11278                 temp_settings.reload()
11279                 temp_settings.reset()
11280                 return temp_settings
11281
11282         def _deallocate_config(self, settings):
11283                 self._config_pool[settings["ROOT"]].append(settings)
11284
11285         def _main_loop(self):
11286
11287                 # Only allow 1 job max if a restart is scheduled
11288                 # due to portage update.
11289                 if self._is_restart_scheduled() or \
11290                         self._opts_no_background.intersection(self.myopts):
11291                         self._set_max_jobs(1)
11292
11293                 merge_queue = self._task_queues.merge
11294
11295                 while self._schedule():
11296                         if self._poll_event_handlers:
11297                                 self._poll_loop()
11298
11299                 while True:
11300                         self._schedule()
11301                         if not (self._jobs or merge_queue):
11302                                 break
11303                         if self._poll_event_handlers:
11304                                 self._poll_loop()
11305
11306         def _keep_scheduling(self):
11307                 return bool(self._pkg_queue and \
11308                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11309
11310         def _schedule_tasks(self):
11311
11312                 # When the number of jobs drops to zero, process all waiting merges.
11313                 if not self._jobs and self._merge_wait_queue:
11314                         for task in self._merge_wait_queue:
11315                                 task.addExitListener(self._merge_wait_exit_handler)
11316                                 self._task_queues.merge.add(task)
11317                         self._status_display.merges = len(self._task_queues.merge)
11318                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11319                         del self._merge_wait_queue[:]
11320
11321                 self._schedule_tasks_imp()
11322                 self._status_display.display()
11323
11324                 state_change = 0
11325                 for q in self._task_queues.values():
11326                         if q.schedule():
11327                                 state_change += 1
11328
11329                 # Cancel prefetchers if they're the only reason
11330                 # the main poll loop is still running.
11331                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11332                         not (self._jobs or self._task_queues.merge) and \
11333                         self._task_queues.fetch:
11334                         self._task_queues.fetch.clear()
11335                         state_change += 1
11336
11337                 if state_change:
11338                         self._schedule_tasks_imp()
11339                         self._status_display.display()
11340
11341                 return self._keep_scheduling()
11342
11343         def _job_delay(self):
11344                 """
11345                 @rtype: bool
11346                 @returns: True if job scheduling should be delayed, False otherwise.
11347                 """
11348
11349                 if self._jobs and self._max_load is not None:
11350
11351                         current_time = time.time()
11352
11353                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11354                         if delay > self._job_delay_max:
11355                                 delay = self._job_delay_max
11356                         if (current_time - self._previous_job_start_time) < delay:
11357                                 return True
11358
11359                 return False
11360
11361         def _schedule_tasks_imp(self):
11362                 """
11363                 @rtype: bool
11364                 @returns: True if state changed, False otherwise.
11365                 """
11366
11367                 state_change = 0
11368
11369                 while True:
11370
11371                         if not self._keep_scheduling():
11372                                 return bool(state_change)
11373
11374                         if self._choose_pkg_return_early or \
11375                                 self._merge_wait_scheduled or \
11376                                 (self._jobs and self._unsatisfied_system_deps) or \
11377                                 not self._can_add_job() or \
11378                                 self._job_delay():
11379                                 return bool(state_change)
11380
11381                         pkg = self._choose_pkg()
11382                         if pkg is None:
11383                                 return bool(state_change)
11384
11385                         state_change += 1
11386
11387                         if not pkg.installed:
11388                                 self._pkg_count.curval += 1
11389
11390                         task = self._task(pkg)
11391
11392                         if pkg.installed:
11393                                 merge = PackageMerge(merge=task)
11394                                 merge.addExitListener(self._merge_exit)
11395                                 self._task_queues.merge.add(merge)
11396
11397                         elif pkg.built:
11398                                 self._jobs += 1
11399                                 self._previous_job_start_time = time.time()
11400                                 self._status_display.running = self._jobs
11401                                 task.addExitListener(self._extract_exit)
11402                                 self._task_queues.jobs.add(task)
11403
11404                         else:
11405                                 self._jobs += 1
11406                                 self._previous_job_start_time = time.time()
11407                                 self._status_display.running = self._jobs
11408                                 task.addExitListener(self._build_exit)
11409                                 self._task_queues.jobs.add(task)
11410
11411                 return bool(state_change)
11412
11413         def _task(self, pkg):
11414
11415                 pkg_to_replace = None
11416                 if pkg.operation != "uninstall":
11417                         vardb = pkg.root_config.trees["vartree"].dbapi
11418                         previous_cpv = vardb.match(pkg.slot_atom)
11419                         if previous_cpv:
11420                                 previous_cpv = previous_cpv.pop()
11421                                 pkg_to_replace = self._pkg(previous_cpv,
11422                                         "installed", pkg.root_config, installed=True)
11423
11424                 task = MergeListItem(args_set=self._args_set,
11425                         background=self._background, binpkg_opts=self._binpkg_opts,
11426                         build_opts=self._build_opts,
11427                         config_pool=self._ConfigPool(pkg.root,
11428                         self._allocate_config, self._deallocate_config),
11429                         emerge_opts=self.myopts,
11430                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11431                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11432                         pkg_to_replace=pkg_to_replace,
11433                         prefetcher=self._prefetchers.get(pkg),
11434                         scheduler=self._sched_iface,
11435                         settings=self._allocate_config(pkg.root),
11436                         statusMessage=self._status_msg,
11437                         world_atom=self._world_atom)
11438
11439                 return task
11440
11441         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11442                 pkg = failed_pkg.pkg
11443                 msg = "%s to %s %s" % \
11444                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11445                 if pkg.root != "/":
11446                         msg += " %s %s" % (preposition, pkg.root)
11447
11448                 log_path = self._locate_failure_log(failed_pkg)
11449                 if log_path is not None:
11450                         msg += ", Log file:"
11451                 self._status_msg(msg)
11452
11453                 if log_path is not None:
11454                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11455
11456         def _status_msg(self, msg):
11457                 """
11458                 Display a brief status message (no newlines) in the status display.
11459                 This is called by tasks to provide feedback to the user. This
11460                 delegates the resposibility of generating \r and \n control characters,
11461                 to guarantee that lines are created or erased when necessary and
11462                 appropriate.
11463
11464                 @type msg: str
11465                 @param msg: a brief status message (no newlines allowed)
11466                 """
11467                 if not self._background:
11468                         writemsg_level("\n")
11469                 self._status_display.displayMessage(msg)
11470
11471         def _save_resume_list(self):
11472                 """
11473                 Do this before verifying the ebuild Manifests since it might
11474                 be possible for the user to use --resume --skipfirst get past
11475                 a non-essential package with a broken digest.
11476                 """
11477                 mtimedb = self._mtimedb
11478                 mtimedb["resume"]["mergelist"] = [list(x) \
11479                         for x in self._mergelist \
11480                         if isinstance(x, Package) and x.operation == "merge"]
11481
11482                 mtimedb.commit()
11483
11484         def _calc_resume_list(self):
11485                 """
11486                 Use the current resume list to calculate a new one,
11487                 dropping any packages with unsatisfied deps.
11488                 @rtype: bool
11489                 @returns: True if successful, False otherwise.
11490                 """
11491                 print colorize("GOOD", "*** Resuming merge...")
11492
11493                 if self._show_list():
11494                         if "--tree" in self.myopts:
11495                                 portage.writemsg_stdout("\n" + \
11496                                         darkgreen("These are the packages that " + \
11497                                         "would be merged, in reverse order:\n\n"))
11498
11499                         else:
11500                                 portage.writemsg_stdout("\n" + \
11501                                         darkgreen("These are the packages that " + \
11502                                         "would be merged, in order:\n\n"))
11503
11504                 show_spinner = "--quiet" not in self.myopts and \
11505                         "--nodeps" not in self.myopts
11506
11507                 if show_spinner:
11508                         print "Calculating dependencies  ",
11509
11510                 myparams = create_depgraph_params(self.myopts, None)
11511                 success = False
11512                 e = None
11513                 try:
11514                         success, mydepgraph, dropped_tasks = resume_depgraph(
11515                                 self.settings, self.trees, self._mtimedb, self.myopts,
11516                                 myparams, self._spinner)
11517                 except depgraph.UnsatisfiedResumeDep, exc:
11518                         # rename variable to avoid python-3.0 error:
11519                         # SyntaxError: can not delete variable 'e' referenced in nested
11520                         #              scope
11521                         e = exc
11522                         mydepgraph = e.depgraph
11523                         dropped_tasks = set()
11524
11525                 if show_spinner:
11526                         print "\b\b... done!"
11527
11528                 if e is not None:
11529                         def unsatisfied_resume_dep_msg():
11530                                 mydepgraph.display_problems()
11531                                 out = portage.output.EOutput()
11532                                 out.eerror("One or more packages are either masked or " + \
11533                                         "have missing dependencies:")
11534                                 out.eerror("")
11535                                 indent = "  "
11536                                 show_parents = set()
11537                                 for dep in e.value:
11538                                         if dep.parent in show_parents:
11539                                                 continue
11540                                         show_parents.add(dep.parent)
11541                                         if dep.atom is None:
11542                                                 out.eerror(indent + "Masked package:")
11543                                                 out.eerror(2 * indent + str(dep.parent))
11544                                                 out.eerror("")
11545                                         else:
11546                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11547                                                 out.eerror(2 * indent + str(dep.parent))
11548                                                 out.eerror("")
11549                                 msg = "The resume list contains packages " + \
11550                                         "that are either masked or have " + \
11551                                         "unsatisfied dependencies. " + \
11552                                         "Please restart/continue " + \
11553                                         "the operation manually, or use --skipfirst " + \
11554                                         "to skip the first package in the list and " + \
11555                                         "any other packages that may be " + \
11556                                         "masked or have missing dependencies."
11557                                 for line in textwrap.wrap(msg, 72):
11558                                         out.eerror(line)
11559                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11560                         return False
11561
11562                 if success and self._show_list():
11563                         mylist = mydepgraph.altlist()
11564                         if mylist:
11565                                 if "--tree" in self.myopts:
11566                                         mylist.reverse()
11567                                 mydepgraph.display(mylist, favorites=self._favorites)
11568
11569                 if not success:
11570                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11571                         return False
11572                 mydepgraph.display_problems()
11573
11574                 mylist = mydepgraph.altlist()
11575                 mydepgraph.break_refs(mylist)
11576                 mydepgraph.break_refs(dropped_tasks)
11577                 self._mergelist = mylist
11578                 self._set_digraph(mydepgraph.schedulerGraph())
11579
11580                 msg_width = 75
11581                 for task in dropped_tasks:
11582                         if not (isinstance(task, Package) and task.operation == "merge"):
11583                                 continue
11584                         pkg = task
11585                         msg = "emerge --keep-going:" + \
11586                                 " %s" % (pkg.cpv,)
11587                         if pkg.root != "/":
11588                                 msg += " for %s" % (pkg.root,)
11589                         msg += " dropped due to unsatisfied dependency."
11590                         for line in textwrap.wrap(msg, msg_width):
11591                                 eerror(line, phase="other", key=pkg.cpv)
11592                         settings = self.pkgsettings[pkg.root]
11593                         # Ensure that log collection from $T is disabled inside
11594                         # elog_process(), since any logs that might exist are
11595                         # not valid here.
11596                         settings.pop("T", None)
11597                         portage.elog.elog_process(pkg.cpv, settings)
11598                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11599
11600                 return True
11601
11602         def _show_list(self):
11603                 myopts = self.myopts
11604                 if "--quiet" not in myopts and \
11605                         ("--ask" in myopts or "--tree" in myopts or \
11606                         "--verbose" in myopts):
11607                         return True
11608                 return False
11609
11610         def _world_atom(self, pkg):
11611                 """
11612                 Add the package to the world file, but only if
11613                 it's supposed to be added. Otherwise, do nothing.
11614                 """
11615
11616                 if set(("--buildpkgonly", "--fetchonly",
11617                         "--fetch-all-uri",
11618                         "--oneshot", "--onlydeps",
11619                         "--pretend")).intersection(self.myopts):
11620                         return
11621
11622                 if pkg.root != self.target_root:
11623                         return
11624
11625                 args_set = self._args_set
11626                 if not args_set.findAtomForPackage(pkg):
11627                         return
11628
11629                 logger = self._logger
11630                 pkg_count = self._pkg_count
11631                 root_config = pkg.root_config
11632                 world_set = root_config.sets["world"]
11633                 world_locked = False
11634                 if hasattr(world_set, "lock"):
11635                         world_set.lock()
11636                         world_locked = True
11637
11638                 try:
11639                         if hasattr(world_set, "load"):
11640                                 world_set.load() # maybe it's changed on disk
11641
11642                         atom = create_world_atom(pkg, args_set, root_config)
11643                         if atom:
11644                                 if hasattr(world_set, "add"):
11645                                         self._status_msg(('Recording %s in "world" ' + \
11646                                                 'favorites file...') % atom)
11647                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11648                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11649                                         world_set.add(atom)
11650                                 else:
11651                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11652                                                 (atom,), level=logging.WARN, noiselevel=-1)
11653                 finally:
11654                         if world_locked:
11655                                 world_set.unlock()
11656
11657         def _pkg(self, cpv, type_name, root_config, installed=False):
11658                 """
11659                 Get a package instance from the cache, or create a new
11660                 one if necessary. Raises KeyError from aux_get if it
11661                 failures for some reason (package does not exist or is
11662                 corrupt).
11663                 """
11664                 operation = "merge"
11665                 if installed:
11666                         operation = "nomerge"
11667
11668                 if self._digraph is not None:
11669                         # Reuse existing instance when available.
11670                         pkg = self._digraph.get(
11671                                 (type_name, root_config.root, cpv, operation))
11672                         if pkg is not None:
11673                                 return pkg
11674
11675                 tree_type = depgraph.pkg_tree_map[type_name]
11676                 db = root_config.trees[tree_type].dbapi
11677                 db_keys = list(self.trees[root_config.root][
11678                         tree_type].dbapi._aux_cache_keys)
11679                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11680                 pkg = Package(cpv=cpv, metadata=metadata,
11681                         root_config=root_config, installed=installed)
11682                 if type_name == "ebuild":
11683                         settings = self.pkgsettings[root_config.root]
11684                         settings.setcpv(pkg)
11685                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11686                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11687
11688                 return pkg
11689
11690 class MetadataRegen(PollScheduler):
11691
11692         def __init__(self, portdb, cp_iter=None, consumer=None,
11693                 max_jobs=None, max_load=None):
11694                 PollScheduler.__init__(self)
11695                 self._portdb = portdb
11696                 self._global_cleanse = False
11697                 if cp_iter is None:
11698                         cp_iter = self._iter_every_cp()
11699                         # We can globally cleanse stale cache only if we
11700                         # iterate over every single cp.
11701                         self._global_cleanse = True
11702                 self._cp_iter = cp_iter
11703                 self._consumer = consumer
11704
11705                 if max_jobs is None:
11706                         max_jobs = 1
11707
11708                 self._max_jobs = max_jobs
11709                 self._max_load = max_load
11710                 self._sched_iface = self._sched_iface_class(
11711                         register=self._register,
11712                         schedule=self._schedule_wait,
11713                         unregister=self._unregister)
11714
11715                 self._valid_pkgs = set()
11716                 self._cp_set = set()
11717                 self._process_iter = self._iter_metadata_processes()
11718                 self.returncode = os.EX_OK
11719                 self._error_count = 0
11720
11721         def _iter_every_cp(self):
11722                 every_cp = self._portdb.cp_all()
11723                 every_cp.sort(reverse=True)
11724                 try:
11725                         while True:
11726                                 yield every_cp.pop()
11727                 except IndexError:
11728                         pass
11729
11730         def _iter_metadata_processes(self):
11731                 portdb = self._portdb
11732                 valid_pkgs = self._valid_pkgs
11733                 cp_set = self._cp_set
11734                 consumer = self._consumer
11735
11736                 for cp in self._cp_iter:
11737                         cp_set.add(cp)
11738                         portage.writemsg_stdout("Processing %s\n" % cp)
11739                         cpv_list = portdb.cp_list(cp)
11740                         for cpv in cpv_list:
11741                                 valid_pkgs.add(cpv)
11742                                 ebuild_path, repo_path = portdb.findname2(cpv)
11743                                 metadata, st, emtime = portdb._pull_valid_cache(
11744                                         cpv, ebuild_path, repo_path)
11745                                 if metadata is not None:
11746                                         if consumer is not None:
11747                                                 consumer(cpv, ebuild_path,
11748                                                         repo_path, metadata)
11749                                         continue
11750
11751                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11752                                         ebuild_mtime=emtime,
11753                                         metadata_callback=portdb._metadata_callback,
11754                                         portdb=portdb, repo_path=repo_path,
11755                                         settings=portdb.doebuild_settings)
11756
11757         def run(self):
11758
11759                 portdb = self._portdb
11760                 from portage.cache.cache_errors import CacheError
11761                 dead_nodes = {}
11762
11763                 while self._schedule():
11764                         self._poll_loop()
11765
11766                 while self._jobs:
11767                         self._poll_loop()
11768
11769                 if self._global_cleanse:
11770                         for mytree in portdb.porttrees:
11771                                 try:
11772                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11773                                 except CacheError, e:
11774                                         portage.writemsg("Error listing cache entries for " + \
11775                                                 "'%s': %s, continuing...\n" % (mytree, e),
11776                                                 noiselevel=-1)
11777                                         del e
11778                                         dead_nodes = None
11779                                         break
11780                 else:
11781                         cp_set = self._cp_set
11782                         cpv_getkey = portage.cpv_getkey
11783                         for mytree in portdb.porttrees:
11784                                 try:
11785                                         dead_nodes[mytree] = set(cpv for cpv in \
11786                                                 portdb.auxdb[mytree].iterkeys() \
11787                                                 if cpv_getkey(cpv) in cp_set)
11788                                 except CacheError, e:
11789                                         portage.writemsg("Error listing cache entries for " + \
11790                                                 "'%s': %s, continuing...\n" % (mytree, e),
11791                                                 noiselevel=-1)
11792                                         del e
11793                                         dead_nodes = None
11794                                         break
11795
11796                 if dead_nodes:
11797                         for y in self._valid_pkgs:
11798                                 for mytree in portdb.porttrees:
11799                                         if portdb.findname2(y, mytree=mytree)[0]:
11800                                                 dead_nodes[mytree].discard(y)
11801
11802                         for mytree, nodes in dead_nodes.iteritems():
11803                                 auxdb = portdb.auxdb[mytree]
11804                                 for y in nodes:
11805                                         try:
11806                                                 del auxdb[y]
11807                                         except (KeyError, CacheError):
11808                                                 pass
11809
11810         def _schedule_tasks(self):
11811                 """
11812                 @rtype: bool
11813                 @returns: True if there may be remaining tasks to schedule,
11814                         False otherwise.
11815                 """
11816                 while self._can_add_job():
11817                         try:
11818                                 metadata_process = self._process_iter.next()
11819                         except StopIteration:
11820                                 return False
11821
11822                         self._jobs += 1
11823                         metadata_process.scheduler = self._sched_iface
11824                         metadata_process.addExitListener(self._metadata_exit)
11825                         metadata_process.start()
11826                 return True
11827
11828         def _metadata_exit(self, metadata_process):
11829                 self._jobs -= 1
11830                 if metadata_process.returncode != os.EX_OK:
11831                         self.returncode = 1
11832                         self._error_count += 1
11833                         self._valid_pkgs.discard(metadata_process.cpv)
11834                         portage.writemsg("Error processing %s, continuing...\n" % \
11835                                 (metadata_process.cpv,), noiselevel=-1)
11836
11837                 if self._consumer is not None:
11838                         # On failure, still notify the consumer (in this case the metadata
11839                         # argument is None).
11840                         self._consumer(metadata_process.cpv,
11841                                 metadata_process.ebuild_path,
11842                                 metadata_process.repo_path,
11843                                 metadata_process.metadata)
11844
11845                 self._schedule()
11846
11847 class UninstallFailure(portage.exception.PortageException):
11848         """
11849         An instance of this class is raised by unmerge() when
11850         an uninstallation fails.
11851         """
11852         status = 1
11853         def __init__(self, *pargs):
11854                 portage.exception.PortageException.__init__(self, pargs)
11855                 if pargs:
11856                         self.status = pargs[0]
11857
11858 def unmerge(root_config, myopts, unmerge_action,
11859         unmerge_files, ldpath_mtimes, autoclean=0,
11860         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11861         scheduler=None, writemsg_level=portage.util.writemsg_level):
11862
11863         if clean_world:
11864                 clean_world = myopts.get('--deselect') != 'n'
11865         quiet = "--quiet" in myopts
11866         settings = root_config.settings
11867         sets = root_config.sets
11868         vartree = root_config.trees["vartree"]
11869         candidate_catpkgs=[]
11870         global_unmerge=0
11871         xterm_titles = "notitles" not in settings.features
11872         out = portage.output.EOutput()
11873         pkg_cache = {}
11874         db_keys = list(vartree.dbapi._aux_cache_keys)
11875
11876         def _pkg(cpv):
11877                 pkg = pkg_cache.get(cpv)
11878                 if pkg is None:
11879                         pkg = Package(cpv=cpv, installed=True,
11880                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11881                                 root_config=root_config,
11882                                 type_name="installed")
11883                         pkg_cache[cpv] = pkg
11884                 return pkg
11885
11886         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11887         try:
11888                 # At least the parent needs to exist for the lock file.
11889                 portage.util.ensure_dirs(vdb_path)
11890         except portage.exception.PortageException:
11891                 pass
11892         vdb_lock = None
11893         try:
11894                 if os.access(vdb_path, os.W_OK):
11895                         vdb_lock = portage.locks.lockdir(vdb_path)
11896                 realsyslist = sets["system"].getAtoms()
11897                 syslist = []
11898                 for x in realsyslist:
11899                         mycp = portage.dep_getkey(x)
11900                         if mycp in settings.getvirtuals():
11901                                 providers = []
11902                                 for provider in settings.getvirtuals()[mycp]:
11903                                         if vartree.dbapi.match(provider):
11904                                                 providers.append(provider)
11905                                 if len(providers) == 1:
11906                                         syslist.extend(providers)
11907                         else:
11908                                 syslist.append(mycp)
11909         
11910                 mysettings = portage.config(clone=settings)
11911         
11912                 if not unmerge_files:
11913                         if unmerge_action == "unmerge":
11914                                 print
11915                                 print bold("emerge unmerge") + " can only be used with specific package names"
11916                                 print
11917                                 return 0
11918                         else:
11919                                 global_unmerge = 1
11920         
11921                 localtree = vartree
11922                 # process all arguments and add all
11923                 # valid db entries to candidate_catpkgs
11924                 if global_unmerge:
11925                         if not unmerge_files:
11926                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11927                 else:
11928                         #we've got command-line arguments
11929                         if not unmerge_files:
11930                                 print "\nNo packages to unmerge have been provided.\n"
11931                                 return 0
11932                         for x in unmerge_files:
11933                                 arg_parts = x.split('/')
11934                                 if x[0] not in [".","/"] and \
11935                                         arg_parts[-1][-7:] != ".ebuild":
11936                                         #possible cat/pkg or dep; treat as such
11937                                         candidate_catpkgs.append(x)
11938                                 elif unmerge_action in ["prune","clean"]:
11939                                         print "\n!!! Prune and clean do not accept individual" + \
11940                                                 " ebuilds as arguments;\n    skipping.\n"
11941                                         continue
11942                                 else:
11943                                         # it appears that the user is specifying an installed
11944                                         # ebuild and we're in "unmerge" mode, so it's ok.
11945                                         if not os.path.exists(x):
11946                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11947                                                 return 0
11948         
11949                                         absx   = os.path.abspath(x)
11950                                         sp_absx = absx.split("/")
11951                                         if sp_absx[-1][-7:] == ".ebuild":
11952                                                 del sp_absx[-1]
11953                                                 absx = "/".join(sp_absx)
11954         
11955                                         sp_absx_len = len(sp_absx)
11956         
11957                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11958                                         vdb_len  = len(vdb_path)
11959         
11960                                         sp_vdb     = vdb_path.split("/")
11961                                         sp_vdb_len = len(sp_vdb)
11962         
11963                                         if not os.path.exists(absx+"/CONTENTS"):
11964                                                 print "!!! Not a valid db dir: "+str(absx)
11965                                                 return 0
11966         
11967                                         if sp_absx_len <= sp_vdb_len:
11968                                                 # The Path is shorter... so it can't be inside the vdb.
11969                                                 print sp_absx
11970                                                 print absx
11971                                                 print "\n!!!",x,"cannot be inside "+ \
11972                                                         vdb_path+"; aborting.\n"
11973                                                 return 0
11974         
11975                                         for idx in range(0,sp_vdb_len):
11976                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11977                                                         print sp_absx
11978                                                         print absx
11979                                                         print "\n!!!", x, "is not inside "+\
11980                                                                 vdb_path+"; aborting.\n"
11981                                                         return 0
11982         
11983                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11984                                         candidate_catpkgs.append(
11985                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11986         
11987                 newline=""
11988                 if (not "--quiet" in myopts):
11989                         newline="\n"
11990                 if settings["ROOT"] != "/":
11991                         writemsg_level(darkgreen(newline+ \
11992                                 ">>> Using system located in ROOT tree %s\n" % \
11993                                 settings["ROOT"]))
11994
11995                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11996                         not ("--quiet" in myopts):
11997                         writemsg_level(darkgreen(newline+\
11998                                 ">>> These are the packages that would be unmerged:\n"))
11999
12000                 # Preservation of order is required for --depclean and --prune so
12001                 # that dependencies are respected. Use all_selected to eliminate
12002                 # duplicate packages since the same package may be selected by
12003                 # multiple atoms.
12004                 pkgmap = []
12005                 all_selected = set()
12006                 for x in candidate_catpkgs:
12007                         # cycle through all our candidate deps and determine
12008                         # what will and will not get unmerged
12009                         try:
12010                                 mymatch = vartree.dbapi.match(x)
12011                         except portage.exception.AmbiguousPackageName, errpkgs:
12012                                 print "\n\n!!! The short ebuild name \"" + \
12013                                         x + "\" is ambiguous.  Please specify"
12014                                 print "!!! one of the following fully-qualified " + \
12015                                         "ebuild names instead:\n"
12016                                 for i in errpkgs[0]:
12017                                         print "    " + green(i)
12018                                 print
12019                                 sys.exit(1)
12020         
12021                         if not mymatch and x[0] not in "<>=~":
12022                                 mymatch = localtree.dep_match(x)
12023                         if not mymatch:
12024                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12025                                         (x, unmerge_action), noiselevel=-1)
12026                                 continue
12027
12028                         pkgmap.append(
12029                                 {"protected": set(), "selected": set(), "omitted": set()})
12030                         mykey = len(pkgmap) - 1
12031                         if unmerge_action=="unmerge":
12032                                         for y in mymatch:
12033                                                 if y not in all_selected:
12034                                                         pkgmap[mykey]["selected"].add(y)
12035                                                         all_selected.add(y)
12036                         elif unmerge_action == "prune":
12037                                 if len(mymatch) == 1:
12038                                         continue
12039                                 best_version = mymatch[0]
12040                                 best_slot = vartree.getslot(best_version)
12041                                 best_counter = vartree.dbapi.cpv_counter(best_version)
12042                                 for mypkg in mymatch[1:]:
12043                                         myslot = vartree.getslot(mypkg)
12044                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
12045                                         if (myslot == best_slot and mycounter > best_counter) or \
12046                                                 mypkg == portage.best([mypkg, best_version]):
12047                                                 if myslot == best_slot:
12048                                                         if mycounter < best_counter:
12049                                                                 # On slot collision, keep the one with the
12050                                                                 # highest counter since it is the most
12051                                                                 # recently installed.
12052                                                                 continue
12053                                                 best_version = mypkg
12054                                                 best_slot = myslot
12055                                                 best_counter = mycounter
12056                                 pkgmap[mykey]["protected"].add(best_version)
12057                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12058                                         if mypkg != best_version and mypkg not in all_selected)
12059                                 all_selected.update(pkgmap[mykey]["selected"])
12060                         else:
12061                                 # unmerge_action == "clean"
12062                                 slotmap={}
12063                                 for mypkg in mymatch:
12064                                         if unmerge_action == "clean":
12065                                                 myslot = localtree.getslot(mypkg)
12066                                         else:
12067                                                 # since we're pruning, we don't care about slots
12068                                                 # and put all the pkgs in together
12069                                                 myslot = 0
12070                                         if myslot not in slotmap:
12071                                                 slotmap[myslot] = {}
12072                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12073
12074                                 for mypkg in vartree.dbapi.cp_list(
12075                                         portage.dep_getkey(mymatch[0])):
12076                                         myslot = vartree.getslot(mypkg)
12077                                         if myslot not in slotmap:
12078                                                 slotmap[myslot] = {}
12079                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12080
12081                                 for myslot in slotmap:
12082                                         counterkeys = slotmap[myslot].keys()
12083                                         if not counterkeys:
12084                                                 continue
12085                                         counterkeys.sort()
12086                                         pkgmap[mykey]["protected"].add(
12087                                                 slotmap[myslot][counterkeys[-1]])
12088                                         del counterkeys[-1]
12089
12090                                         for counter in counterkeys[:]:
12091                                                 mypkg = slotmap[myslot][counter]
12092                                                 if mypkg not in mymatch:
12093                                                         counterkeys.remove(counter)
12094                                                         pkgmap[mykey]["protected"].add(
12095                                                                 slotmap[myslot][counter])
12096
12097                                         #be pretty and get them in order of merge:
12098                                         for ckey in counterkeys:
12099                                                 mypkg = slotmap[myslot][ckey]
12100                                                 if mypkg not in all_selected:
12101                                                         pkgmap[mykey]["selected"].add(mypkg)
12102                                                         all_selected.add(mypkg)
12103                                         # ok, now the last-merged package
12104                                         # is protected, and the rest are selected
12105                 numselected = len(all_selected)
12106                 if global_unmerge and not numselected:
12107                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12108                         return 0
12109         
12110                 if not numselected:
12111                         portage.writemsg_stdout(
12112                                 "\n>>> No packages selected for removal by " + \
12113                                 unmerge_action + "\n")
12114                         return 0
12115         finally:
12116                 if vdb_lock:
12117                         vartree.dbapi.flush_cache()
12118                         portage.locks.unlockdir(vdb_lock)
12119         
12120         from portage.sets.base import EditablePackageSet
12121         
12122         # generate a list of package sets that are directly or indirectly listed in "world",
12123         # as there is no persistent list of "installed" sets
12124         installed_sets = ["world"]
12125         stop = False
12126         pos = 0
12127         while not stop:
12128                 stop = True
12129                 pos = len(installed_sets)
12130                 for s in installed_sets[pos - 1:]:
12131                         if s not in sets:
12132                                 continue
12133                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12134                         if candidates:
12135                                 stop = False
12136                                 installed_sets += candidates
12137         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12138         del stop, pos
12139
12140         # we don't want to unmerge packages that are still listed in user-editable package sets
12141         # listed in "world" as they would be remerged on the next update of "world" or the 
12142         # relevant package sets.
12143         unknown_sets = set()
12144         for cp in xrange(len(pkgmap)):
12145                 for cpv in pkgmap[cp]["selected"].copy():
12146                         try:
12147                                 pkg = _pkg(cpv)
12148                         except KeyError:
12149                                 # It could have been uninstalled
12150                                 # by a concurrent process.
12151                                 continue
12152
12153                         if unmerge_action != "clean" and \
12154                                 root_config.root == "/" and \
12155                                 portage.match_from_list(
12156                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12157                                 msg = ("Not unmerging package %s since there is no valid " + \
12158                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12159                                 for line in textwrap.wrap(msg, 75):
12160                                         out.eerror(line)
12161                                 # adjust pkgmap so the display output is correct
12162                                 pkgmap[cp]["selected"].remove(cpv)
12163                                 all_selected.remove(cpv)
12164                                 pkgmap[cp]["protected"].add(cpv)
12165                                 continue
12166
12167                         parents = []
12168                         for s in installed_sets:
12169                                 # skip sets that the user requested to unmerge, and skip world 
12170                                 # unless we're unmerging a package set (as the package would be 
12171                                 # removed from "world" later on)
12172                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12173                                         continue
12174
12175                                 if s not in sets:
12176                                         if s in unknown_sets:
12177                                                 continue
12178                                         unknown_sets.add(s)
12179                                         out = portage.output.EOutput()
12180                                         out.eerror(("Unknown set '@%s' in " + \
12181                                                 "%svar/lib/portage/world_sets") % \
12182                                                 (s, root_config.root))
12183                                         continue
12184
12185                                 # only check instances of EditablePackageSet as other classes are generally used for
12186                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12187                                 # user can't do much about them anyway)
12188                                 if isinstance(sets[s], EditablePackageSet):
12189
12190                                         # This is derived from a snippet of code in the
12191                                         # depgraph._iter_atoms_for_pkg() method.
12192                                         for atom in sets[s].iterAtomsForPackage(pkg):
12193                                                 inst_matches = vartree.dbapi.match(atom)
12194                                                 inst_matches.reverse() # descending order
12195                                                 higher_slot = None
12196                                                 for inst_cpv in inst_matches:
12197                                                         try:
12198                                                                 inst_pkg = _pkg(inst_cpv)
12199                                                         except KeyError:
12200                                                                 # It could have been uninstalled
12201                                                                 # by a concurrent process.
12202                                                                 continue
12203
12204                                                         if inst_pkg.cp != atom.cp:
12205                                                                 continue
12206                                                         if pkg >= inst_pkg:
12207                                                                 # This is descending order, and we're not
12208                                                                 # interested in any versions <= pkg given.
12209                                                                 break
12210                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12211                                                                 higher_slot = inst_pkg
12212                                                                 break
12213                                                 if higher_slot is None:
12214                                                         parents.append(s)
12215                                                         break
12216                         if parents:
12217                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12218                                 #print colorize("WARN", "but still listed in the following package sets:")
12219                                 #print "    %s\n" % ", ".join(parents)
12220                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12221                                 print colorize("WARN", "still referenced by the following package sets:")
12222                                 print "    %s\n" % ", ".join(parents)
12223                                 # adjust pkgmap so the display output is correct
12224                                 pkgmap[cp]["selected"].remove(cpv)
12225                                 all_selected.remove(cpv)
12226                                 pkgmap[cp]["protected"].add(cpv)
12227         
12228         del installed_sets
12229
12230         numselected = len(all_selected)
12231         if not numselected:
12232                 writemsg_level(
12233                         "\n>>> No packages selected for removal by " + \
12234                         unmerge_action + "\n")
12235                 return 0
12236
12237         # Unmerge order only matters in some cases
12238         if not ordered:
12239                 unordered = {}
12240                 for d in pkgmap:
12241                         selected = d["selected"]
12242                         if not selected:
12243                                 continue
12244                         cp = portage.cpv_getkey(iter(selected).next())
12245                         cp_dict = unordered.get(cp)
12246                         if cp_dict is None:
12247                                 cp_dict = {}
12248                                 unordered[cp] = cp_dict
12249                                 for k in d:
12250                                         cp_dict[k] = set()
12251                         for k, v in d.iteritems():
12252                                 cp_dict[k].update(v)
12253                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12254
12255         for x in xrange(len(pkgmap)):
12256                 selected = pkgmap[x]["selected"]
12257                 if not selected:
12258                         continue
12259                 for mytype, mylist in pkgmap[x].iteritems():
12260                         if mytype == "selected":
12261                                 continue
12262                         mylist.difference_update(all_selected)
12263                 cp = portage.cpv_getkey(iter(selected).next())
12264                 for y in localtree.dep_match(cp):
12265                         if y not in pkgmap[x]["omitted"] and \
12266                                 y not in pkgmap[x]["selected"] and \
12267                                 y not in pkgmap[x]["protected"] and \
12268                                 y not in all_selected:
12269                                 pkgmap[x]["omitted"].add(y)
12270                 if global_unmerge and not pkgmap[x]["selected"]:
12271                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12272                         continue
12273                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12274                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12275                                 "'%s' is part of your system profile.\n" % cp),
12276                                 level=logging.WARNING, noiselevel=-1)
12277                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12278                                 "be damaging to your system.\n\n"),
12279                                 level=logging.WARNING, noiselevel=-1)
12280                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12281                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12282                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12283                 if not quiet:
12284                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12285                 else:
12286                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12287                 for mytype in ["selected","protected","omitted"]:
12288                         if not quiet:
12289                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12290                         if pkgmap[x][mytype]:
12291                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12292                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12293                                 for pn, ver, rev in sorted_pkgs:
12294                                         if rev == "r0":
12295                                                 myversion = ver
12296                                         else:
12297                                                 myversion = ver + "-" + rev
12298                                         if mytype == "selected":
12299                                                 writemsg_level(
12300                                                         colorize("UNMERGE_WARN", myversion + " "),
12301                                                         noiselevel=-1)
12302                                         else:
12303                                                 writemsg_level(
12304                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12305                         else:
12306                                 writemsg_level("none ", noiselevel=-1)
12307                         if not quiet:
12308                                 writemsg_level("\n", noiselevel=-1)
12309                 if quiet:
12310                         writemsg_level("\n", noiselevel=-1)
12311
12312         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12313                 " packages are slated for removal.\n")
12314         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12315                         " and " + colorize("GOOD", "'omitted'") + \
12316                         " packages will not be removed.\n\n")
12317
12318         if "--pretend" in myopts:
12319                 #we're done... return
12320                 return 0
12321         if "--ask" in myopts:
12322                 if userquery("Would you like to unmerge these packages?")=="No":
12323                         # enter pretend mode for correct formatting of results
12324                         myopts["--pretend"] = True
12325                         print
12326                         print "Quitting."
12327                         print
12328                         return 0
12329         #the real unmerging begins, after a short delay....
12330         if clean_delay and not autoclean:
12331                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12332
12333         for x in xrange(len(pkgmap)):
12334                 for y in pkgmap[x]["selected"]:
12335                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12336                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12337                         mysplit = y.split("/")
12338                         #unmerge...
12339                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12340                                 mysettings, unmerge_action not in ["clean","prune"],
12341                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12342                                 scheduler=scheduler)
12343
12344                         if retval != os.EX_OK:
12345                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12346                                 if raise_on_error:
12347                                         raise UninstallFailure(retval)
12348                                 sys.exit(retval)
12349                         else:
12350                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12351                                         sets["world"].cleanPackage(vartree.dbapi, y)
12352                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12353         if clean_world and hasattr(sets["world"], "remove"):
12354                 for s in root_config.setconfig.active:
12355                         sets["world"].remove(SETPREFIX+s)
12356         return 1
12357
12358 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12359
12360         if os.path.exists("/usr/bin/install-info"):
12361                 out = portage.output.EOutput()
12362                 regen_infodirs=[]
12363                 for z in infodirs:
12364                         if z=='':
12365                                 continue
12366                         inforoot=normpath(root+z)
12367                         if os.path.isdir(inforoot):
12368                                 infomtime = long(os.stat(inforoot).st_mtime)
12369                                 if inforoot not in prev_mtimes or \
12370                                         prev_mtimes[inforoot] != infomtime:
12371                                                 regen_infodirs.append(inforoot)
12372
12373                 if not regen_infodirs:
12374                         portage.writemsg_stdout("\n")
12375                         out.einfo("GNU info directory index is up-to-date.")
12376                 else:
12377                         portage.writemsg_stdout("\n")
12378                         out.einfo("Regenerating GNU info directory index...")
12379
12380                         dir_extensions = ("", ".gz", ".bz2")
12381                         icount=0
12382                         badcount=0
12383                         errmsg = ""
12384                         for inforoot in regen_infodirs:
12385                                 if inforoot=='':
12386                                         continue
12387
12388                                 if not os.path.isdir(inforoot) or \
12389                                         not os.access(inforoot, os.W_OK):
12390                                         continue
12391
12392                                 file_list = os.listdir(inforoot)
12393                                 file_list.sort()
12394                                 dir_file = os.path.join(inforoot, "dir")
12395                                 moved_old_dir = False
12396                                 processed_count = 0
12397                                 for x in file_list:
12398                                         if x.startswith(".") or \
12399                                                 os.path.isdir(os.path.join(inforoot, x)):
12400                                                 continue
12401                                         if x.startswith("dir"):
12402                                                 skip = False
12403                                                 for ext in dir_extensions:
12404                                                         if x == "dir" + ext or \
12405                                                                 x == "dir" + ext + ".old":
12406                                                                 skip = True
12407                                                                 break
12408                                                 if skip:
12409                                                         continue
12410                                         if processed_count == 0:
12411                                                 for ext in dir_extensions:
12412                                                         try:
12413                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12414                                                                 moved_old_dir = True
12415                                                         except EnvironmentError, e:
12416                                                                 if e.errno != errno.ENOENT:
12417                                                                         raise
12418                                                                 del e
12419                                         processed_count += 1
12420                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12421                                         existsstr="already exists, for file `"
12422                                         if myso!="":
12423                                                 if re.search(existsstr,myso):
12424                                                         # Already exists... Don't increment the count for this.
12425                                                         pass
12426                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12427                                                         # This info file doesn't contain a DIR-header: install-info produces this
12428                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12429                                                         # Don't increment the count for this.
12430                                                         pass
12431                                                 else:
12432                                                         badcount=badcount+1
12433                                                         errmsg += myso + "\n"
12434                                         icount=icount+1
12435
12436                                 if moved_old_dir and not os.path.exists(dir_file):
12437                                         # We didn't generate a new dir file, so put the old file
12438                                         # back where it was originally found.
12439                                         for ext in dir_extensions:
12440                                                 try:
12441                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12442                                                 except EnvironmentError, e:
12443                                                         if e.errno != errno.ENOENT:
12444                                                                 raise
12445                                                         del e
12446
12447                                 # Clean dir.old cruft so that they don't prevent
12448                                 # unmerge of otherwise empty directories.
12449                                 for ext in dir_extensions:
12450                                         try:
12451                                                 os.unlink(dir_file + ext + ".old")
12452                                         except EnvironmentError, e:
12453                                                 if e.errno != errno.ENOENT:
12454                                                         raise
12455                                                 del e
12456
12457                                 #update mtime so we can potentially avoid regenerating.
12458                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12459
12460                         if badcount:
12461                                 out.eerror("Processed %d info files; %d errors." % \
12462                                         (icount, badcount))
12463                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12464                         else:
12465                                 if icount > 0:
12466                                         out.einfo("Processed %d info files." % (icount,))
12467
12468
12469 def display_news_notification(root_config, myopts):
12470         target_root = root_config.root
12471         trees = root_config.trees
12472         settings = trees["vartree"].settings
12473         portdb = trees["porttree"].dbapi
12474         vardb = trees["vartree"].dbapi
12475         NEWS_PATH = os.path.join("metadata", "news")
12476         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12477         newsReaderDisplay = False
12478         update = "--pretend" not in myopts
12479
12480         for repo in portdb.getRepositories():
12481                 unreadItems = checkUpdatedNewsItems(
12482                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12483                 if unreadItems:
12484                         if not newsReaderDisplay:
12485                                 newsReaderDisplay = True
12486                                 print
12487                         print colorize("WARN", " * IMPORTANT:"),
12488                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12489                         
12490         
12491         if newsReaderDisplay:
12492                 print colorize("WARN", " *"),
12493                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12494                 print
12495
12496 def display_preserved_libs(vardbapi):
12497         MAX_DISPLAY = 3
12498
12499         # Ensure the registry is consistent with existing files.
12500         vardbapi.plib_registry.pruneNonExisting()
12501
12502         if vardbapi.plib_registry.hasEntries():
12503                 print
12504                 print colorize("WARN", "!!!") + " existing preserved libs:"
12505                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12506                 linkmap = vardbapi.linkmap
12507                 consumer_map = {}
12508                 owners = {}
12509                 linkmap_broken = False
12510
12511                 try:
12512                         linkmap.rebuild()
12513                 except portage.exception.CommandNotFound, e:
12514                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12515                                 level=logging.ERROR, noiselevel=-1)
12516                         del e
12517                         linkmap_broken = True
12518                 else:
12519                         search_for_owners = set()
12520                         for cpv in plibdata:
12521                                 internal_plib_keys = set(linkmap._obj_key(f) \
12522                                         for f in plibdata[cpv])
12523                                 for f in plibdata[cpv]:
12524                                         if f in consumer_map:
12525                                                 continue
12526                                         consumers = []
12527                                         for c in linkmap.findConsumers(f):
12528                                                 # Filter out any consumers that are also preserved libs
12529                                                 # belonging to the same package as the provider.
12530                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12531                                                         consumers.append(c)
12532                                         consumers.sort()
12533                                         consumer_map[f] = consumers
12534                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12535
12536                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12537
12538                 for cpv in plibdata:
12539                         print colorize("WARN", ">>>") + " package: %s" % cpv
12540                         samefile_map = {}
12541                         for f in plibdata[cpv]:
12542                                 obj_key = linkmap._obj_key(f)
12543                                 alt_paths = samefile_map.get(obj_key)
12544                                 if alt_paths is None:
12545                                         alt_paths = set()
12546                                         samefile_map[obj_key] = alt_paths
12547                                 alt_paths.add(f)
12548
12549                         for alt_paths in samefile_map.itervalues():
12550                                 alt_paths = sorted(alt_paths)
12551                                 for p in alt_paths:
12552                                         print colorize("WARN", " * ") + " - %s" % (p,)
12553                                 f = alt_paths[0]
12554                                 consumers = consumer_map.get(f, [])
12555                                 for c in consumers[:MAX_DISPLAY]:
12556                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12557                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12558                                 if len(consumers) == MAX_DISPLAY + 1:
12559                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12560                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12561                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12562                                 elif len(consumers) > MAX_DISPLAY:
12563                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12564                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12565
12566
12567 def _flush_elog_mod_echo():
12568         """
12569         Dump the mod_echo output now so that our other
12570         notifications are shown last.
12571         @rtype: bool
12572         @returns: True if messages were shown, False otherwise.
12573         """
12574         messages_shown = False
12575         try:
12576                 from portage.elog import mod_echo
12577         except ImportError:
12578                 pass # happens during downgrade to a version without the module
12579         else:
12580                 messages_shown = bool(mod_echo._items)
12581                 mod_echo.finalize()
12582         return messages_shown
12583
12584 def post_emerge(root_config, myopts, mtimedb, retval):
12585         """
12586         Misc. things to run at the end of a merge session.
12587         
12588         Update Info Files
12589         Update Config Files
12590         Update News Items
12591         Commit mtimeDB
12592         Display preserved libs warnings
12593         Exit Emerge
12594
12595         @param trees: A dictionary mapping each ROOT to it's package databases
12596         @type trees: dict
12597         @param mtimedb: The mtimeDB to store data needed across merge invocations
12598         @type mtimedb: MtimeDB class instance
12599         @param retval: Emerge's return value
12600         @type retval: Int
12601         @rype: None
12602         @returns:
12603         1.  Calls sys.exit(retval)
12604         """
12605
12606         target_root = root_config.root
12607         trees = { target_root : root_config.trees }
12608         vardbapi = trees[target_root]["vartree"].dbapi
12609         settings = vardbapi.settings
12610         info_mtimes = mtimedb["info"]
12611
12612         # Load the most current variables from ${ROOT}/etc/profile.env
12613         settings.unlock()
12614         settings.reload()
12615         settings.regenerate()
12616         settings.lock()
12617
12618         config_protect = settings.get("CONFIG_PROTECT","").split()
12619         infodirs = settings.get("INFOPATH","").split(":") + \
12620                 settings.get("INFODIR","").split(":")
12621
12622         os.chdir("/")
12623
12624         if retval == os.EX_OK:
12625                 exit_msg = " *** exiting successfully."
12626         else:
12627                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12628         emergelog("notitles" not in settings.features, exit_msg)
12629
12630         _flush_elog_mod_echo()
12631
12632         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12633         if "--pretend" in myopts or (counter_hash is not None and \
12634                 counter_hash == vardbapi._counter_hash()):
12635                 display_news_notification(root_config, myopts)
12636                 # If vdb state has not changed then there's nothing else to do.
12637                 sys.exit(retval)
12638
12639         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12640         portage.util.ensure_dirs(vdb_path)
12641         vdb_lock = None
12642         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12643                 vdb_lock = portage.locks.lockdir(vdb_path)
12644
12645         if vdb_lock:
12646                 try:
12647                         if "noinfo" not in settings.features:
12648                                 chk_updated_info_files(target_root,
12649                                         infodirs, info_mtimes, retval)
12650                         mtimedb.commit()
12651                 finally:
12652                         if vdb_lock:
12653                                 portage.locks.unlockdir(vdb_lock)
12654
12655         chk_updated_cfg_files(target_root, config_protect)
12656         
12657         display_news_notification(root_config, myopts)
12658         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12659                 display_preserved_libs(vardbapi)        
12660
12661         sys.exit(retval)
12662
12663
12664 def chk_updated_cfg_files(target_root, config_protect):
12665         if config_protect:
12666                 #number of directories with some protect files in them
12667                 procount=0
12668                 for x in config_protect:
12669                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12670                         if not os.access(x, os.W_OK):
12671                                 # Avoid Permission denied errors generated
12672                                 # later by `find`.
12673                                 continue
12674                         try:
12675                                 mymode = os.lstat(x).st_mode
12676                         except OSError:
12677                                 continue
12678                         if stat.S_ISLNK(mymode):
12679                                 # We want to treat it like a directory if it
12680                                 # is a symlink to an existing directory.
12681                                 try:
12682                                         real_mode = os.stat(x).st_mode
12683                                         if stat.S_ISDIR(real_mode):
12684                                                 mymode = real_mode
12685                                 except OSError:
12686                                         pass
12687                         if stat.S_ISDIR(mymode):
12688                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12689                         else:
12690                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12691                                         os.path.split(x.rstrip(os.path.sep))
12692                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12693                         a = commands.getstatusoutput(mycommand)
12694                         if a[0] != 0:
12695                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12696                                 sys.stderr.flush()
12697                                 # Show the error message alone, sending stdout to /dev/null.
12698                                 os.system(mycommand + " 1>/dev/null")
12699                         else:
12700                                 files = a[1].split('\0')
12701                                 # split always produces an empty string as the last element
12702                                 if files and not files[-1]:
12703                                         del files[-1]
12704                                 if files:
12705                                         procount += 1
12706                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12707                                         if stat.S_ISDIR(mymode):
12708                                                  print "%d config files in '%s' need updating." % \
12709                                                         (len(files), x)
12710                                         else:
12711                                                  print "config file '%s' needs updating." % x
12712
12713                 if procount:
12714                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12715                                 " section of the " + bold("emerge")
12716                         print " "+yellow("*")+" man page to learn how to update config files."
12717
12718 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12719         update=False):
12720         """
12721         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12722         Returns the number of unread (yet relevent) items.
12723         
12724         @param portdb: a portage tree database
12725         @type portdb: pordbapi
12726         @param vardb: an installed package database
12727         @type vardb: vardbapi
12728         @param NEWS_PATH:
12729         @type NEWS_PATH:
12730         @param UNREAD_PATH:
12731         @type UNREAD_PATH:
12732         @param repo_id:
12733         @type repo_id:
12734         @rtype: Integer
12735         @returns:
12736         1.  The number of unread but relevant news items.
12737         
12738         """
12739         from portage.news import NewsManager
12740         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12741         return manager.getUnreadItems( repo_id, update=update )
12742
12743 def insert_category_into_atom(atom, category):
12744         alphanum = re.search(r'\w', atom)
12745         if alphanum:
12746                 ret = atom[:alphanum.start()] + "%s/" % category + \
12747                         atom[alphanum.start():]
12748         else:
12749                 ret = None
12750         return ret
12751
12752 def is_valid_package_atom(x):
12753         if "/" not in x:
12754                 alphanum = re.search(r'\w', x)
12755                 if alphanum:
12756                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12757         return portage.isvalidatom(x)
12758
12759 def show_blocker_docs_link():
12760         print
12761         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12762         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12763         print
12764         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12765         print
12766
12767 def show_mask_docs():
12768         print "For more information, see the MASKED PACKAGES section in the emerge"
12769         print "man page or refer to the Gentoo Handbook."
12770
12771 def action_sync(settings, trees, mtimedb, myopts, myaction):
12772         xterm_titles = "notitles" not in settings.features
12773         emergelog(xterm_titles, " === sync")
12774         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12775         myportdir = portdb.porttree_root
12776         out = portage.output.EOutput()
12777         if not myportdir:
12778                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12779                 sys.exit(1)
12780         if myportdir[-1]=="/":
12781                 myportdir=myportdir[:-1]
12782         try:
12783                 st = os.stat(myportdir)
12784         except OSError:
12785                 st = None
12786         if st is None:
12787                 print ">>>",myportdir,"not found, creating it."
12788                 os.makedirs(myportdir,0755)
12789                 st = os.stat(myportdir)
12790
12791         spawn_kwargs = {}
12792         spawn_kwargs["env"] = settings.environ()
12793         if 'usersync' in settings.features and \
12794                 portage.data.secpass >= 2 and \
12795                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12796                 st.st_gid != os.getgid() and st.st_mode & 0070):
12797                 try:
12798                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12799                 except KeyError:
12800                         pass
12801                 else:
12802                         # Drop privileges when syncing, in order to match
12803                         # existing uid/gid settings.
12804                         spawn_kwargs["uid"]    = st.st_uid
12805                         spawn_kwargs["gid"]    = st.st_gid
12806                         spawn_kwargs["groups"] = [st.st_gid]
12807                         spawn_kwargs["env"]["HOME"] = homedir
12808                         umask = 0002
12809                         if not st.st_mode & 0020:
12810                                 umask = umask | 0020
12811                         spawn_kwargs["umask"] = umask
12812
12813         syncuri = settings.get("SYNC", "").strip()
12814         if not syncuri:
12815                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12816                         noiselevel=-1, level=logging.ERROR)
12817                 return 1
12818
12819         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12820         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12821
12822         os.umask(0022)
12823         dosyncuri = syncuri
12824         updatecache_flg = False
12825         if myaction == "metadata":
12826                 print "skipping sync"
12827                 updatecache_flg = True
12828         elif ".git" in vcs_dirs:
12829                 # Update existing git repository, and ignore the syncuri. We are
12830                 # going to trust the user and assume that the user is in the branch
12831                 # that he/she wants updated. We'll let the user manage branches with
12832                 # git directly.
12833                 if portage.process.find_binary("git") is None:
12834                         msg = ["Command not found: git",
12835                         "Type \"emerge dev-util/git\" to enable git support."]
12836                         for l in msg:
12837                                 writemsg_level("!!! %s\n" % l,
12838                                         level=logging.ERROR, noiselevel=-1)
12839                         return 1
12840                 msg = ">>> Starting git pull in %s..." % myportdir
12841                 emergelog(xterm_titles, msg )
12842                 writemsg_level(msg + "\n")
12843                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12844                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12845                 if exitcode != os.EX_OK:
12846                         msg = "!!! git pull error in %s." % myportdir
12847                         emergelog(xterm_titles, msg)
12848                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12849                         return exitcode
12850                 msg = ">>> Git pull in %s successful" % myportdir
12851                 emergelog(xterm_titles, msg)
12852                 writemsg_level(msg + "\n")
12853                 exitcode = git_sync_timestamps(settings, myportdir)
12854                 if exitcode == os.EX_OK:
12855                         updatecache_flg = True
12856         elif syncuri[:8]=="rsync://":
12857                 for vcs_dir in vcs_dirs:
12858                         writemsg_level(("!!! %s appears to be under revision " + \
12859                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12860                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12861                         return 1
12862                 if not os.path.exists("/usr/bin/rsync"):
12863                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12864                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12865                         sys.exit(1)
12866                 mytimeout=180
12867
12868                 rsync_opts = []
12869                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12870                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12871                         rsync_opts.extend([
12872                                 "--recursive",    # Recurse directories
12873                                 "--links",        # Consider symlinks
12874                                 "--safe-links",   # Ignore links outside of tree
12875                                 "--perms",        # Preserve permissions
12876                                 "--times",        # Preserive mod times
12877                                 "--compress",     # Compress the data transmitted
12878                                 "--force",        # Force deletion on non-empty dirs
12879                                 "--whole-file",   # Don't do block transfers, only entire files
12880                                 "--delete",       # Delete files that aren't in the master tree
12881                                 "--stats",        # Show final statistics about what was transfered
12882                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12883                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12884                                 "--exclude=/local",       # Exclude local     from consideration
12885                                 "--exclude=/packages",    # Exclude packages  from consideration
12886                         ])
12887
12888                 else:
12889                         # The below validation is not needed when using the above hardcoded
12890                         # defaults.
12891
12892                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12893                         rsync_opts.extend(
12894                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12895                         for opt in ("--recursive", "--times"):
12896                                 if opt not in rsync_opts:
12897                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12898                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12899                                         rsync_opts.append(opt)
12900         
12901                         for exclude in ("distfiles", "local", "packages"):
12902                                 opt = "--exclude=/%s" % exclude
12903                                 if opt not in rsync_opts:
12904                                         portage.writemsg(yellow("WARNING:") + \
12905                                         " adding required option %s not included in "  % opt + \
12906                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12907                                         rsync_opts.append(opt)
12908         
12909                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12910                                 def rsync_opt_startswith(opt_prefix):
12911                                         for x in rsync_opts:
12912                                                 if x.startswith(opt_prefix):
12913                                                         return True
12914                                         return False
12915
12916                                 if not rsync_opt_startswith("--timeout="):
12917                                         rsync_opts.append("--timeout=%d" % mytimeout)
12918
12919                                 for opt in ("--compress", "--whole-file"):
12920                                         if opt not in rsync_opts:
12921                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12922                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12923                                                 rsync_opts.append(opt)
12924
12925                 if "--quiet" in myopts:
12926                         rsync_opts.append("--quiet")    # Shut up a lot
12927                 else:
12928                         rsync_opts.append("--verbose")  # Print filelist
12929
12930                 if "--verbose" in myopts:
12931                         rsync_opts.append("--progress")  # Progress meter for each file
12932
12933                 if "--debug" in myopts:
12934                         rsync_opts.append("--checksum") # Force checksum on all files
12935
12936                 # Real local timestamp file.
12937                 servertimestampfile = os.path.join(
12938                         myportdir, "metadata", "timestamp.chk")
12939
12940                 content = portage.util.grabfile(servertimestampfile)
12941                 mytimestamp = 0
12942                 if content:
12943                         try:
12944                                 mytimestamp = time.mktime(time.strptime(content[0],
12945                                         "%a, %d %b %Y %H:%M:%S +0000"))
12946                         except (OverflowError, ValueError):
12947                                 pass
12948                 del content
12949
12950                 try:
12951                         rsync_initial_timeout = \
12952                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12953                 except ValueError:
12954                         rsync_initial_timeout = 15
12955
12956                 try:
12957                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12958                 except SystemExit, e:
12959                         raise # Needed else can't exit
12960                 except:
12961                         maxretries=3 #default number of retries
12962
12963                 retries=0
12964                 user_name, hostname, port = re.split(
12965                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12966                 if port is None:
12967                         port=""
12968                 if user_name is None:
12969                         user_name=""
12970                 updatecache_flg=True
12971                 all_rsync_opts = set(rsync_opts)
12972                 extra_rsync_opts = shlex.split(
12973                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12974                 all_rsync_opts.update(extra_rsync_opts)
12975                 family = socket.AF_INET
12976                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12977                         family = socket.AF_INET
12978                 elif socket.has_ipv6 and \
12979                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12980                         family = socket.AF_INET6
12981                 ips=[]
12982                 SERVER_OUT_OF_DATE = -1
12983                 EXCEEDED_MAX_RETRIES = -2
12984                 while (1):
12985                         if ips:
12986                                 del ips[0]
12987                         if ips==[]:
12988                                 try:
12989                                         for addrinfo in socket.getaddrinfo(
12990                                                 hostname, None, family, socket.SOCK_STREAM):
12991                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12992                                                         # IPv6 addresses need to be enclosed in square brackets
12993                                                         ips.append("[%s]" % addrinfo[4][0])
12994                                                 else:
12995                                                         ips.append(addrinfo[4][0])
12996                                         from random import shuffle
12997                                         shuffle(ips)
12998                                 except SystemExit, e:
12999                                         raise # Needed else can't exit
13000                                 except Exception, e:
13001                                         print "Notice:",str(e)
13002                                         dosyncuri=syncuri
13003
13004                         if ips:
13005                                 try:
13006                                         dosyncuri = syncuri.replace(
13007                                                 "//" + user_name + hostname + port + "/",
13008                                                 "//" + user_name + ips[0] + port + "/", 1)
13009                                 except SystemExit, e:
13010                                         raise # Needed else can't exit
13011                                 except Exception, e:
13012                                         print "Notice:",str(e)
13013                                         dosyncuri=syncuri
13014
13015                         if (retries==0):
13016                                 if "--ask" in myopts:
13017                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13018                                                 print
13019                                                 print "Quitting."
13020                                                 print
13021                                                 sys.exit(0)
13022                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13023                                 if "--quiet" not in myopts:
13024                                         print ">>> Starting rsync with "+dosyncuri+"..."
13025                         else:
13026                                 emergelog(xterm_titles,
13027                                         ">>> Starting retry %d of %d with %s" % \
13028                                                 (retries,maxretries,dosyncuri))
13029                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13030
13031                         if mytimestamp != 0 and "--quiet" not in myopts:
13032                                 print ">>> Checking server timestamp ..."
13033
13034                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13035
13036                         if "--debug" in myopts:
13037                                 print rsynccommand
13038
13039                         exitcode = os.EX_OK
13040                         servertimestamp = 0
13041                         # Even if there's no timestamp available locally, fetch the
13042                         # timestamp anyway as an initial probe to verify that the server is
13043                         # responsive.  This protects us from hanging indefinitely on a
13044                         # connection attempt to an unresponsive server which rsync's
13045                         # --timeout option does not prevent.
13046                         if True:
13047                                 # Temporary file for remote server timestamp comparison.
13048                                 from tempfile import mkstemp
13049                                 fd, tmpservertimestampfile = mkstemp()
13050                                 os.close(fd)
13051                                 mycommand = rsynccommand[:]
13052                                 mycommand.append(dosyncuri.rstrip("/") + \
13053                                         "/metadata/timestamp.chk")
13054                                 mycommand.append(tmpservertimestampfile)
13055                                 content = None
13056                                 mypids = []
13057                                 try:
13058                                         def timeout_handler(signum, frame):
13059                                                 raise portage.exception.PortageException("timed out")
13060                                         signal.signal(signal.SIGALRM, timeout_handler)
13061                                         # Timeout here in case the server is unresponsive.  The
13062                                         # --timeout rsync option doesn't apply to the initial
13063                                         # connection attempt.
13064                                         if rsync_initial_timeout:
13065                                                 signal.alarm(rsync_initial_timeout)
13066                                         try:
13067                                                 mypids.extend(portage.process.spawn(
13068                                                         mycommand, env=settings.environ(), returnpid=True))
13069                                                 exitcode = os.waitpid(mypids[0], 0)[1]
13070                                                 content = portage.grabfile(tmpservertimestampfile)
13071                                         finally:
13072                                                 if rsync_initial_timeout:
13073                                                         signal.alarm(0)
13074                                                 try:
13075                                                         os.unlink(tmpservertimestampfile)
13076                                                 except OSError:
13077                                                         pass
13078                                 except portage.exception.PortageException, e:
13079                                         # timed out
13080                                         print e
13081                                         del e
13082                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13083                                                 os.kill(mypids[0], signal.SIGTERM)
13084                                                 os.waitpid(mypids[0], 0)
13085                                         # This is the same code rsync uses for timeout.
13086                                         exitcode = 30
13087                                 else:
13088                                         if exitcode != os.EX_OK:
13089                                                 if exitcode & 0xff:
13090                                                         exitcode = (exitcode & 0xff) << 8
13091                                                 else:
13092                                                         exitcode = exitcode >> 8
13093                                 if mypids:
13094                                         portage.process.spawned_pids.remove(mypids[0])
13095                                 if content:
13096                                         try:
13097                                                 servertimestamp = time.mktime(time.strptime(
13098                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13099                                         except (OverflowError, ValueError):
13100                                                 pass
13101                                 del mycommand, mypids, content
13102                         if exitcode == os.EX_OK:
13103                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13104                                         emergelog(xterm_titles,
13105                                                 ">>> Cancelling sync -- Already current.")
13106                                         print
13107                                         print ">>>"
13108                                         print ">>> Timestamps on the server and in the local repository are the same."
13109                                         print ">>> Cancelling all further sync action. You are already up to date."
13110                                         print ">>>"
13111                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13112                                         print ">>>"
13113                                         print
13114                                         sys.exit(0)
13115                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13116                                         emergelog(xterm_titles,
13117                                                 ">>> Server out of date: %s" % dosyncuri)
13118                                         print
13119                                         print ">>>"
13120                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13121                                         print ">>>"
13122                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13123                                         print ">>>"
13124                                         print
13125                                         exitcode = SERVER_OUT_OF_DATE
13126                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13127                                         # actual sync
13128                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13129                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13130                                         if exitcode in [0,1,3,4,11,14,20,21]:
13131                                                 break
13132                         elif exitcode in [1,3,4,11,14,20,21]:
13133                                 break
13134                         else:
13135                                 # Code 2 indicates protocol incompatibility, which is expected
13136                                 # for servers with protocol < 29 that don't support
13137                                 # --prune-empty-directories.  Retry for a server that supports
13138                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13139                                 pass
13140
13141                         retries=retries+1
13142
13143                         if retries<=maxretries:
13144                                 print ">>> Retrying..."
13145                                 time.sleep(11)
13146                         else:
13147                                 # over retries
13148                                 # exit loop
13149                                 updatecache_flg=False
13150                                 exitcode = EXCEEDED_MAX_RETRIES
13151                                 break
13152
13153                 if (exitcode==0):
13154                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13155                 elif exitcode == SERVER_OUT_OF_DATE:
13156                         sys.exit(1)
13157                 elif exitcode == EXCEEDED_MAX_RETRIES:
13158                         sys.stderr.write(
13159                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13160                         sys.exit(1)
13161                 elif (exitcode>0):
13162                         msg = []
13163                         if exitcode==1:
13164                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13165                                 msg.append("that your SYNC statement is proper.")
13166                                 msg.append("SYNC=" + settings["SYNC"])
13167                         elif exitcode==11:
13168                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13169                                 msg.append("this means your disk is full, but can be caused by corruption")
13170                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13171                                 msg.append("and try again after the problem has been fixed.")
13172                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13173                         elif exitcode==20:
13174                                 msg.append("Rsync was killed before it finished.")
13175                         else:
13176                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13177                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13178                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13179                                 msg.append("temporary problem unless complications exist with your network")
13180                                 msg.append("(and possibly your system's filesystem) configuration.")
13181                         for line in msg:
13182                                 out.eerror(line)
13183                         sys.exit(exitcode)
13184         elif syncuri[:6]=="cvs://":
13185                 if not os.path.exists("/usr/bin/cvs"):
13186                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13187                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13188                         sys.exit(1)
13189                 cvsroot=syncuri[6:]
13190                 cvsdir=os.path.dirname(myportdir)
13191                 if not os.path.exists(myportdir+"/CVS"):
13192                         #initial checkout
13193                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13194                         if os.path.exists(cvsdir+"/gentoo-x86"):
13195                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13196                                 sys.exit(1)
13197                         try:
13198                                 os.rmdir(myportdir)
13199                         except OSError, e:
13200                                 if e.errno != errno.ENOENT:
13201                                         sys.stderr.write(
13202                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13203                                         sys.exit(1)
13204                                 del e
13205                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13206                                 print "!!! cvs checkout error; exiting."
13207                                 sys.exit(1)
13208                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13209                 else:
13210                         #cvs update
13211                         print ">>> Starting cvs update with "+syncuri+"..."
13212                         retval = portage.process.spawn_bash(
13213                                 "cd %s; cvs -z0 -q update -dP" % \
13214                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13215                         if retval != os.EX_OK:
13216                                 sys.exit(retval)
13217                 dosyncuri = syncuri
13218         else:
13219                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13220                         noiselevel=-1, level=logging.ERROR)
13221                 return 1
13222
13223         if updatecache_flg and  \
13224                 myaction != "metadata" and \
13225                 "metadata-transfer" not in settings.features:
13226                 updatecache_flg = False
13227
13228         # Reload the whole config from scratch.
13229         settings, trees, mtimedb = load_emerge_config(trees=trees)
13230         root_config = trees[settings["ROOT"]]["root_config"]
13231         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13232
13233         if updatecache_flg and \
13234                 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13235
13236                 # Only update cache for myportdir since that's
13237                 # the only one that's been synced here.
13238                 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13239
13240         if portage._global_updates(trees, mtimedb["updates"]):
13241                 mtimedb.commit()
13242                 # Reload the whole config from scratch.
13243                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13244                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13245                 root_config = trees[settings["ROOT"]]["root_config"]
13246
13247         mybestpv = portdb.xmatch("bestmatch-visible",
13248                 portage.const.PORTAGE_PACKAGE_ATOM)
13249         mypvs = portage.best(
13250                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13251                 portage.const.PORTAGE_PACKAGE_ATOM))
13252
13253         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13254
13255         if myaction != "metadata":
13256                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13257                         retval = portage.process.spawn(
13258                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13259                                 dosyncuri], env=settings.environ())
13260                         if retval != os.EX_OK:
13261                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13262
13263         if(mybestpv != mypvs) and not "--quiet" in myopts:
13264                 print
13265                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13266                 print red(" * ")+"that you update portage now, before any other packages are updated."
13267                 print
13268                 print red(" * ")+"To update portage, run 'emerge portage' now."
13269                 print
13270         
13271         display_news_notification(root_config, myopts)
13272         return os.EX_OK
13273
13274 def git_sync_timestamps(settings, portdir):
13275         """
13276         Since git doesn't preserve timestamps, synchronize timestamps between
13277         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13278         for a given file as long as the file in the working tree is not modified
13279         (relative to HEAD).
13280         """
13281         cache_dir = os.path.join(portdir, "metadata", "cache")
13282         if not os.path.isdir(cache_dir):
13283                 return os.EX_OK
13284         writemsg_level(">>> Synchronizing timestamps...\n")
13285
13286         from portage.cache.cache_errors import CacheError
13287         try:
13288                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13289                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13290         except CacheError, e:
13291                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13292                         level=logging.ERROR, noiselevel=-1)
13293                 return 1
13294
13295         ec_dir = os.path.join(portdir, "eclass")
13296         try:
13297                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13298                         if f.endswith(".eclass"))
13299         except OSError, e:
13300                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13301                         level=logging.ERROR, noiselevel=-1)
13302                 return 1
13303
13304         args = [portage.const.BASH_BINARY, "-c",
13305                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13306                 portage._shell_quote(portdir)]
13307         import subprocess
13308         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13309         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13310         rval = proc.wait()
13311         if rval != os.EX_OK:
13312                 return rval
13313
13314         modified_eclasses = set(ec for ec in ec_names \
13315                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13316
13317         updated_ec_mtimes = {}
13318
13319         for cpv in cache_db:
13320                 cpv_split = portage.catpkgsplit(cpv)
13321                 if cpv_split is None:
13322                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13323                                 level=logging.ERROR, noiselevel=-1)
13324                         continue
13325
13326                 cat, pn, ver, rev = cpv_split
13327                 cat, pf = portage.catsplit(cpv)
13328                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13329                 if relative_eb_path in modified_files:
13330                         continue
13331
13332                 try:
13333                         cache_entry = cache_db[cpv]
13334                         eb_mtime = cache_entry.get("_mtime_")
13335                         ec_mtimes = cache_entry.get("_eclasses_")
13336                 except KeyError:
13337                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13338                                 level=logging.ERROR, noiselevel=-1)
13339                         continue
13340                 except CacheError, e:
13341                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13342                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13343                         continue
13344
13345                 if eb_mtime is None:
13346                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13347                                 level=logging.ERROR, noiselevel=-1)
13348                         continue
13349
13350                 try:
13351                         eb_mtime = long(eb_mtime)
13352                 except ValueError:
13353                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13354                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13355                         continue
13356
13357                 if ec_mtimes is None:
13358                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13359                                 level=logging.ERROR, noiselevel=-1)
13360                         continue
13361
13362                 if modified_eclasses.intersection(ec_mtimes):
13363                         continue
13364
13365                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13366                 if missing_eclasses:
13367                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13368                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13369                                 noiselevel=-1)
13370                         continue
13371
13372                 eb_path = os.path.join(portdir, relative_eb_path)
13373                 try:
13374                         current_eb_mtime = os.stat(eb_path)
13375                 except OSError:
13376                         writemsg_level("!!! Missing ebuild: %s\n" % \
13377                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13378                         continue
13379
13380                 inconsistent = False
13381                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13382                         updated_mtime = updated_ec_mtimes.get(ec)
13383                         if updated_mtime is not None and updated_mtime != ec_mtime:
13384                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13385                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13386                                 inconsistent = True
13387                                 break
13388
13389                 if inconsistent:
13390                         continue
13391
13392                 if current_eb_mtime != eb_mtime:
13393                         os.utime(eb_path, (eb_mtime, eb_mtime))
13394
13395                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13396                         if ec in updated_ec_mtimes:
13397                                 continue
13398                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13399                         current_mtime = long(os.stat(ec_path).st_mtime)
13400                         if current_mtime != ec_mtime:
13401                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13402                         updated_ec_mtimes[ec] = ec_mtime
13403
13404         return os.EX_OK
13405
13406 def action_metadata(settings, portdb, myopts, porttrees=None):
13407         if porttrees is None:
13408                 porttrees = portdb.porttrees
13409         portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13410         old_umask = os.umask(0002)
13411         cachedir = os.path.normpath(settings.depcachedir)
13412         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13413                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13414                                         "/sys", "/tmp", "/usr",  "/var"]:
13415                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13416                         "ROOT DIRECTORY ON YOUR SYSTEM."
13417                 print >> sys.stderr, \
13418                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13419                 sys.exit(73)
13420         if not os.path.exists(cachedir):
13421                 os.makedirs(cachedir)
13422
13423         auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13424         auxdbkeys = tuple(auxdbkeys)
13425
13426         class TreeData(object):
13427                 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13428                 def __init__(self, dest_db, eclass_db, path, src_db):
13429                         self.dest_db = dest_db
13430                         self.eclass_db = eclass_db
13431                         self.path = path
13432                         self.src_db = src_db
13433                         self.valid_nodes = set()
13434
13435         porttrees_data = []
13436         for path in porttrees:
13437                 src_db = portdb._pregen_auxdb.get(path)
13438                 if src_db is None and \
13439                         os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13440                         src_db = portdb.metadbmodule(
13441                                 path, 'metadata/cache', auxdbkeys, readonly=True)
13442                         try:
13443                                 src_db.ec = portdb._repo_info[path].eclass_db
13444                         except AttributeError:
13445                                 pass
13446
13447                 if src_db is not None:
13448                         porttrees_data.append(TreeData(portdb.auxdb[path],
13449                                 portdb._repo_info[path].eclass_db, path, src_db))
13450
13451         porttrees = [tree_data.path for tree_data in porttrees_data]
13452
13453         isatty = sys.stdout.isatty()
13454         quiet = not isatty or '--quiet' in myopts
13455         onProgress = None
13456         if not quiet:
13457                 progressBar = portage.output.TermProgressBar()
13458                 progressHandler = ProgressHandler()
13459                 onProgress = progressHandler.onProgress
13460                 def display():
13461                         progressBar.set(progressHandler.curval, progressHandler.maxval)
13462                 progressHandler.display = display
13463                 def sigwinch_handler(signum, frame):
13464                         lines, progressBar.term_columns = \
13465                                 portage.output.get_term_size()
13466                 signal.signal(signal.SIGWINCH, sigwinch_handler)
13467
13468         # Temporarily override portdb.porttrees so portdb.cp_all()
13469         # will only return the relevant subset.
13470         portdb_porttrees = portdb.porttrees
13471         portdb.porttrees = porttrees
13472         try:
13473                 cp_all = portdb.cp_all()
13474         finally:
13475                 portdb.porttrees = portdb_porttrees
13476
13477         curval = 0
13478         maxval = len(cp_all)
13479         if onProgress is not None:
13480                 onProgress(maxval, curval)
13481
13482         from portage.cache.util import quiet_mirroring
13483         from portage import eapi_is_supported, \
13484                 _validate_cache_for_unsupported_eapis
13485
13486         # TODO: Display error messages, but do not interfere with the progress bar.
13487         # Here's how:
13488         #  1) erase the progress bar
13489         #  2) show the error message
13490         #  3) redraw the progress bar on a new line
13491         noise = quiet_mirroring()
13492
13493         for cp in cp_all:
13494                 for tree_data in porttrees_data:
13495                         for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13496                                 tree_data.valid_nodes.add(cpv)
13497                                 try:
13498                                         src = tree_data.src_db[cpv]
13499                                 except KeyError, e:
13500                                         noise.missing_entry(cpv)
13501                                         del e
13502                                         continue
13503                                 except CacheError, ce:
13504                                         noise.exception(cpv, ce)
13505                                         del ce
13506                                         continue
13507
13508                                 eapi = src.get('EAPI')
13509                                 if not eapi:
13510                                         eapi = '0'
13511                                 eapi = eapi.lstrip('-')
13512                                 eapi_supported = eapi_is_supported(eapi)
13513                                 if not eapi_supported:
13514                                         if not _validate_cache_for_unsupported_eapis:
13515                                                 noise.misc(cpv, "unable to validate " + \
13516                                                         "cache for EAPI='%s'" % eapi)
13517                                                 continue
13518
13519                                 dest = None
13520                                 try:
13521                                         dest = tree_data.dest_db[cpv]
13522                                 except (KeyError, CacheError):
13523                                         pass
13524
13525                                 for d in (src, dest):
13526                                         if d is not None and d.get('EAPI') in ('', '0'):
13527                                                 del d['EAPI']
13528
13529                                 if dest is not None:
13530                                         if not (dest['_mtime_'] == src['_mtime_'] and \
13531                                                 tree_data.eclass_db.is_eclass_data_valid(
13532                                                         dest['_eclasses_']) and \
13533                                                 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13534                                                 dest = None
13535                                         else:
13536                                                 # We don't want to skip the write unless we're really
13537                                                 # sure that the existing cache is identical, so don't
13538                                                 # trust _mtime_ and _eclasses_ alone.
13539                                                 for k in set(chain(src, dest)).difference(
13540                                                         ('_mtime_', '_eclasses_')):
13541                                                         if dest.get(k, '') != src.get(k, ''):
13542                                                                 dest = None
13543                                                                 break
13544
13545                                 if dest is not None:
13546                                         # The existing data is valid and identical,
13547                                         # so there's no need to overwrite it.
13548                                         continue
13549
13550                                 try:
13551                                         inherited = src.get('INHERITED', '')
13552                                         eclasses = src.get('_eclasses_')
13553                                 except CacheError, ce:
13554                                         noise.exception(cpv, ce)
13555                                         del ce
13556                                         continue
13557
13558                                 if eclasses is not None:
13559                                         if not tree_data.eclass_db.is_eclass_data_valid(
13560                                                 src['_eclasses_']):
13561                                                 noise.eclass_stale(cpv)
13562                                                 continue
13563                                         inherited = eclasses
13564                                 else:
13565                                         inherited = inherited.split()
13566
13567                                 if tree_data.src_db.complete_eclass_entries and \
13568                                         eclasses is None:
13569                                         noise.corruption(cpv, "missing _eclasses_ field")
13570                                         continue
13571
13572                                 if inherited:
13573                                         # Even if _eclasses_ already exists, replace it with data from
13574                                         # eclass_cache, in order to insert local eclass paths.
13575                                         try:
13576                                                 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13577                                         except KeyError:
13578                                                 # INHERITED contains a non-existent eclass.
13579                                                 noise.eclass_stale(cpv)
13580                                                 continue
13581
13582                                         if eclasses is None:
13583                                                 noise.eclass_stale(cpv)
13584                                                 continue
13585                                         src['_eclasses_'] = eclasses
13586                                 else:
13587                                         src['_eclasses_'] = {}
13588
13589                                 if not eapi_supported:
13590                                         src = {
13591                                                 'EAPI'       : '-' + eapi,
13592                                                 '_mtime_'    : src['_mtime_'],
13593                                                 '_eclasses_' : src['_eclasses_'],
13594                                         }
13595
13596                                 try:
13597                                         tree_data.dest_db[cpv] = src
13598                                 except CacheError, ce:
13599                                         noise.exception(cpv, ce)
13600                                         del ce
13601
13602                 curval += 1
13603                 if onProgress is not None:
13604                         onProgress(maxval, curval)
13605
13606         if onProgress is not None:
13607                 onProgress(maxval, curval)
13608
13609         for tree_data in porttrees_data:
13610                 try:
13611                         dead_nodes = set(tree_data.dest_db.iterkeys())
13612                 except CacheError, e:
13613                         writemsg_level("Error listing cache entries for " + \
13614                                 "'%s': %s, continuing...\n" % (tree_data.path, e),
13615                                 level=logging.ERROR, noiselevel=-1)
13616                         del e
13617                 else:
13618                         dead_nodes.difference_update(tree_data.valid_nodes)
13619                         for cpv in dead_nodes:
13620                                 try:
13621                                         del tree_data.dest_db[cpv]
13622                                 except (KeyError, CacheError):
13623                                         pass
13624
13625         if not quiet:
13626                 # make sure the final progress is displayed
13627                 progressHandler.display()
13628                 print
13629                 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13630
13631         sys.stdout.flush()
13632         os.umask(old_umask)
13633
13634 def action_regen(settings, portdb, max_jobs, max_load):
13635         xterm_titles = "notitles" not in settings.features
13636         emergelog(xterm_titles, " === regen")
13637         #regenerate cache entries
13638         portage.writemsg_stdout("Regenerating cache entries...\n")
13639         try:
13640                 os.close(sys.stdin.fileno())
13641         except SystemExit, e:
13642                 raise # Needed else can't exit
13643         except:
13644                 pass
13645         sys.stdout.flush()
13646
13647         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13648         regen.run()
13649
13650         portage.writemsg_stdout("done!\n")
13651         return regen.returncode
13652
13653 def action_config(settings, trees, myopts, myfiles):
13654         if len(myfiles) != 1:
13655                 print red("!!! config can only take a single package atom at this time\n")
13656                 sys.exit(1)
13657         if not is_valid_package_atom(myfiles[0]):
13658                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13659                         noiselevel=-1)
13660                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13661                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13662                 sys.exit(1)
13663         print
13664         try:
13665                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13666         except portage.exception.AmbiguousPackageName, e:
13667                 # Multiple matches thrown from cpv_expand
13668                 pkgs = e.args[0]
13669         if len(pkgs) == 0:
13670                 print "No packages found.\n"
13671                 sys.exit(0)
13672         elif len(pkgs) > 1:
13673                 if "--ask" in myopts:
13674                         options = []
13675                         print "Please select a package to configure:"
13676                         idx = 0
13677                         for pkg in pkgs:
13678                                 idx += 1
13679                                 options.append(str(idx))
13680                                 print options[-1]+") "+pkg
13681                         print "X) Cancel"
13682                         options.append("X")
13683                         idx = userquery("Selection?", options)
13684                         if idx == "X":
13685                                 sys.exit(0)
13686                         pkg = pkgs[int(idx)-1]
13687                 else:
13688                         print "The following packages available:"
13689                         for pkg in pkgs:
13690                                 print "* "+pkg
13691                         print "\nPlease use a specific atom or the --ask option."
13692                         sys.exit(1)
13693         else:
13694                 pkg = pkgs[0]
13695
13696         print
13697         if "--ask" in myopts:
13698                 if userquery("Ready to configure "+pkg+"?") == "No":
13699                         sys.exit(0)
13700         else:
13701                 print "Configuring pkg..."
13702         print
13703         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13704         mysettings = portage.config(clone=settings)
13705         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13706         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13707         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13708                 mysettings,
13709                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13710                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13711         if retval == os.EX_OK:
13712                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13713                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13714         print
13715
13716 def action_info(settings, trees, myopts, myfiles):
13717         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13718                 settings.profile_path, settings["CHOST"],
13719                 trees[settings["ROOT"]]["vartree"].dbapi)
13720         header_width = 65
13721         header_title = "System Settings"
13722         if myfiles:
13723                 print header_width * "="
13724                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13725         print header_width * "="
13726         print "System uname: "+platform.platform(aliased=1)
13727
13728         lastSync = portage.grabfile(os.path.join(
13729                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13730         print "Timestamp of tree:",
13731         if lastSync:
13732                 print lastSync[0]
13733         else:
13734                 print "Unknown"
13735
13736         output=commands.getstatusoutput("distcc --version")
13737         if not output[0]:
13738                 print str(output[1].split("\n",1)[0]),
13739                 if "distcc" in settings.features:
13740                         print "[enabled]"
13741                 else:
13742                         print "[disabled]"
13743
13744         output=commands.getstatusoutput("ccache -V")
13745         if not output[0]:
13746                 print str(output[1].split("\n",1)[0]),
13747                 if "ccache" in settings.features:
13748                         print "[enabled]"
13749                 else:
13750                         print "[disabled]"
13751
13752         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13753                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13754         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13755         myvars  = portage.util.unique_array(myvars)
13756         myvars.sort()
13757
13758         for x in myvars:
13759                 if portage.isvalidatom(x):
13760                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13761                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13762                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13763                         pkgs = []
13764                         for pn, ver, rev in pkg_matches:
13765                                 if rev != "r0":
13766                                         pkgs.append(ver + "-" + rev)
13767                                 else:
13768                                         pkgs.append(ver)
13769                         if pkgs:
13770                                 pkgs = ", ".join(pkgs)
13771                                 print "%-20s %s" % (x+":", pkgs)
13772                 else:
13773                         print "%-20s %s" % (x+":", "[NOT VALID]")
13774
13775         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13776
13777         if "--verbose" in myopts:
13778                 myvars=settings.keys()
13779         else:
13780                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13781                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13782                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13783                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13784
13785                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13786
13787         myvars = portage.util.unique_array(myvars)
13788         use_expand = settings.get('USE_EXPAND', '').split()
13789         use_expand.sort()
13790         use_expand_hidden = set(
13791                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13792         alphabetical_use = '--alphabetical' in myopts
13793         root_config = trees[settings["ROOT"]]['root_config']
13794         unset_vars = []
13795         myvars.sort()
13796         for x in myvars:
13797                 if x in settings:
13798                         if x != "USE":
13799                                 print '%s="%s"' % (x, settings[x])
13800                         else:
13801                                 use = set(settings["USE"].split())
13802                                 for varname in use_expand:
13803                                         flag_prefix = varname.lower() + "_"
13804                                         for f in list(use):
13805                                                 if f.startswith(flag_prefix):
13806                                                         use.remove(f)
13807                                 use = list(use)
13808                                 use.sort()
13809                                 print 'USE="%s"' % " ".join(use),
13810                                 for varname in use_expand:
13811                                         myval = settings.get(varname)
13812                                         if myval:
13813                                                 print '%s="%s"' % (varname, myval),
13814                                 print
13815                 else:
13816                         unset_vars.append(x)
13817         if unset_vars:
13818                 print "Unset:  "+", ".join(unset_vars)
13819         print
13820
13821         if "--debug" in myopts:
13822                 for x in dir(portage):
13823                         module = getattr(portage, x)
13824                         if "cvs_id_string" in dir(module):
13825                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13826
13827         # See if we can find any packages installed matching the strings
13828         # passed on the command line
13829         mypkgs = []
13830         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13831         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13832         for x in myfiles:
13833                 mypkgs.extend(vardb.match(x))
13834
13835         # If some packages were found...
13836         if mypkgs:
13837                 # Get our global settings (we only print stuff if it varies from
13838                 # the current config)
13839                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13840                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13841                 auxkeys.append('DEFINED_PHASES')
13842                 global_vals = {}
13843                 pkgsettings = portage.config(clone=settings)
13844
13845                 for myvar in mydesiredvars:
13846                         global_vals[myvar] = set(settings.get(myvar, "").split())
13847
13848                 # Loop through each package
13849                 # Only print settings if they differ from global settings
13850                 header_title = "Package Settings"
13851                 print header_width * "="
13852                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13853                 print header_width * "="
13854                 from portage.output import EOutput
13855                 out = EOutput()
13856                 for cpv in mypkgs:
13857                         # Get all package specific variables
13858                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13859                         pkg = Package(built=True, cpv=cpv,
13860                                 installed=True, metadata=izip(Package.metadata_keys,
13861                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13862                                 root_config=root_config, type_name='installed')
13863                         valuesmap = {}
13864                         for k in auxkeys:
13865                                 valuesmap[k] = set(metadata[k].split())
13866
13867                         diff_values = {}
13868                         for myvar in mydesiredvars:
13869                                 # If the package variable doesn't match the
13870                                 # current global variable, something has changed
13871                                 # so set diff_found so we know to print
13872                                 if valuesmap[myvar] != global_vals[myvar]:
13873                                         diff_values[myvar] = valuesmap[myvar]
13874
13875                         print "\n%s was built with the following:" % \
13876                                 colorize("INFORM", str(pkg.cpv))
13877
13878                         pkgsettings.setcpv(pkg)
13879                         forced_flags = set(chain(pkgsettings.useforce,
13880                                 pkgsettings.usemask))
13881                         use = set(pkg.use.enabled)
13882                         use.discard(pkgsettings.get('ARCH'))
13883                         use_expand_flags = set()
13884                         use_enabled = {}
13885                         use_disabled = {}
13886                         for varname in use_expand:
13887                                 flag_prefix = varname.lower() + "_"
13888                                 for f in use:
13889                                         if f.startswith(flag_prefix):
13890                                                 use_expand_flags.add(f)
13891                                                 use_enabled.setdefault(
13892                                                         varname.upper(), []).append(f[len(flag_prefix):])
13893
13894                                 for f in pkg.iuse.all:
13895                                         if f.startswith(flag_prefix):
13896                                                 use_expand_flags.add(f)
13897                                                 if f not in use:
13898                                                         use_disabled.setdefault(
13899                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13900
13901                         var_order = set(use_enabled)
13902                         var_order.update(use_disabled)
13903                         var_order = sorted(var_order)
13904                         var_order.insert(0, 'USE')
13905                         use.difference_update(use_expand_flags)
13906                         use_enabled['USE'] = list(use)
13907                         use_disabled['USE'] = []
13908
13909                         for f in pkg.iuse.all:
13910                                 if f not in use and \
13911                                         f not in use_expand_flags:
13912                                         use_disabled['USE'].append(f)
13913
13914                         for varname in var_order:
13915                                 if varname in use_expand_hidden:
13916                                         continue
13917                                 flags = []
13918                                 for f in use_enabled.get(varname, []):
13919                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13920                                 for f in use_disabled.get(varname, []):
13921                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13922                                 if alphabetical_use:
13923                                         flags.sort(key=UseFlagDisplay.sort_combined)
13924                                 else:
13925                                         flags.sort(key=UseFlagDisplay.sort_separated)
13926                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13927                         print
13928
13929                         # If a difference was found, print the info for
13930                         # this package.
13931                         if diff_values:
13932                                 # Print package info
13933                                 for myvar in mydesiredvars:
13934                                         if myvar in diff_values:
13935                                                 mylist = list(diff_values[myvar])
13936                                                 mylist.sort()
13937                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13938                         print
13939
13940                         if metadata['DEFINED_PHASES']:
13941                                 if 'info' not in metadata['DEFINED_PHASES'].split():
13942                                         continue
13943
13944                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13945                         ebuildpath = vardb.findname(pkg.cpv)
13946                         if not ebuildpath or not os.path.exists(ebuildpath):
13947                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13948                                 continue
13949                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13950                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13951                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13952                                 tree="vartree")
13953
13954 def action_search(root_config, myopts, myfiles, spinner):
13955         if not myfiles:
13956                 print "emerge: no search terms provided."
13957         else:
13958                 searchinstance = search(root_config,
13959                         spinner, "--searchdesc" in myopts,
13960                         "--quiet" not in myopts, "--usepkg" in myopts,
13961                         "--usepkgonly" in myopts)
13962                 for mysearch in myfiles:
13963                         try:
13964                                 searchinstance.execute(mysearch)
13965                         except re.error, comment:
13966                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13967                                 sys.exit(1)
13968                         searchinstance.output()
13969
13970 def action_uninstall(settings, trees, ldpath_mtimes,
13971         opts, action, files, spinner):
13972
13973         # For backward compat, some actions do not require leading '='.
13974         ignore_missing_eq = action in ('clean', 'unmerge')
13975         root = settings['ROOT']
13976         vardb = trees[root]['vartree'].dbapi
13977         valid_atoms = []
13978         lookup_owners = []
13979
13980         # Ensure atoms are valid before calling unmerge().
13981         # For backward compat, leading '=' is not required.
13982         for x in files:
13983                 if is_valid_package_atom(x) or \
13984                         (ignore_missing_eq and is_valid_package_atom('=' + x)):
13985
13986                         try:
13987                                 valid_atoms.append(
13988                                         portage.dep_expand(x, mydb=vardb, settings=settings))
13989                         except portage.exception.AmbiguousPackageName, e:
13990                                 msg = "The short ebuild name \"" + x + \
13991                                         "\" is ambiguous.  Please specify " + \
13992                                         "one of the following " + \
13993                                         "fully-qualified ebuild names instead:"
13994                                 for line in textwrap.wrap(msg, 70):
13995                                         writemsg_level("!!! %s\n" % (line,),
13996                                                 level=logging.ERROR, noiselevel=-1)
13997                                 for i in e[0]:
13998                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13999                                                 level=logging.ERROR, noiselevel=-1)
14000                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14001                                 return 1
14002
14003                 elif x.startswith(os.sep):
14004                         if not x.startswith(root):
14005                                 writemsg_level(("!!! '%s' does not start with" + \
14006                                         " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14007                                 return 1
14008                         # Queue these up since it's most efficient to handle
14009                         # multiple files in a single iter_owners() call.
14010                         lookup_owners.append(x)
14011
14012                 else:
14013                         msg = []
14014                         msg.append("'%s' is not a valid package atom." % (x,))
14015                         msg.append("Please check ebuild(5) for full details.")
14016                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14017                                 level=logging.ERROR, noiselevel=-1)
14018                         return 1
14019
14020         if lookup_owners:
14021                 relative_paths = []
14022                 search_for_multiple = False
14023                 if len(lookup_owners) > 1:
14024                         search_for_multiple = True
14025
14026                 for x in lookup_owners:
14027                         if not search_for_multiple and os.path.isdir(x):
14028                                 search_for_multiple = True
14029                         relative_paths.append(x[len(root):])
14030
14031                 owners = set()
14032                 for pkg, relative_path in \
14033                         vardb._owners.iter_owners(relative_paths):
14034                         owners.add(pkg.mycpv)
14035                         if not search_for_multiple:
14036                                 break
14037
14038                 if owners:
14039                         for cpv in owners:
14040                                 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14041                                 if not slot:
14042                                         # portage now masks packages with missing slot, but it's
14043                                         # possible that one was installed by an older version
14044                                         atom = portage.cpv_getkey(cpv)
14045                                 else:
14046                                         atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14047                                 valid_atoms.append(portage.dep.Atom(atom))
14048                 else:
14049                         writemsg_level(("!!! '%s' is not claimed " + \
14050                                 "by any package.\n") % lookup_owners[0],
14051                                 level=logging.WARNING, noiselevel=-1)
14052
14053         if files and not valid_atoms:
14054                 return 1
14055
14056         if action in ('clean', 'unmerge') or \
14057                 (action == 'prune' and "--nodeps" in opts):
14058                 # When given a list of atoms, unmerge them in the order given.
14059                 ordered = action == 'unmerge'
14060                 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14061                         valid_atoms, ldpath_mtimes, ordered=ordered)
14062                 rval = os.EX_OK
14063         elif action == 'deselect':
14064                 rval = action_deselect(settings, trees, opts, valid_atoms)
14065         else:
14066                 rval = action_depclean(settings, trees, ldpath_mtimes,
14067                         opts, action, valid_atoms, spinner)
14068
14069         return rval
14070
14071 def action_deselect(settings, trees, opts, atoms):
14072         root_config = trees[settings['ROOT']]['root_config']
14073         world_set = root_config.sets['world']
14074         if not hasattr(world_set, 'update'):
14075                 writemsg_level("World set does not appear to be mutable.\n",
14076                         level=logging.ERROR, noiselevel=-1)
14077                 return 1
14078
14079         vardb = root_config.trees['vartree'].dbapi
14080         expanded_atoms = set(atoms)
14081         from portage.dep import Atom
14082         for atom in atoms:
14083                 for cpv in vardb.match(atom):
14084                         slot, = vardb.aux_get(cpv, ['SLOT'])
14085                         if not slot:
14086                                 slot = '0'
14087                         expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14088
14089         pretend = '--pretend' in opts
14090         locked = False
14091         if not pretend and hasattr(world_set, 'lock'):
14092                 world_set.lock()
14093                 locked = True
14094         try:
14095                 discard_atoms = set()
14096                 world_set.load()
14097                 for atom in world_set:
14098                         if not isinstance(atom, Atom):
14099                                 # nested set
14100                                 continue
14101                         for arg_atom in expanded_atoms:
14102                                 if arg_atom.intersects(atom) and \
14103                                         not (arg_atom.slot and not atom.slot):
14104                                         discard_atoms.add(atom)
14105                                         break
14106                 if discard_atoms:
14107                         for atom in sorted(discard_atoms):
14108                                 print ">>> Removing %s from \"world\" favorites file..." % \
14109                                         colorize("INFORM", str(atom))
14110
14111                         if '--ask' in opts:
14112                                 prompt = "Would you like to remove these " + \
14113                                         "packages from your world favorites?"
14114                                 if userquery(prompt) == 'No':
14115                                         return os.EX_OK
14116
14117                         remaining = set(world_set)
14118                         remaining.difference_update(discard_atoms)
14119                         if not pretend:
14120                                 world_set.replace(remaining)
14121                 else:
14122                         print ">>> No matching atoms found in \"world\" favorites file..."
14123         finally:
14124                 if locked:
14125                         world_set.unlock()
14126         return os.EX_OK
14127
14128 def action_depclean(settings, trees, ldpath_mtimes,
14129         myopts, action, myfiles, spinner):
14130         # Kill packages that aren't explicitly merged or are required as a
14131         # dependency of another package. World file is explicit.
14132
14133         # Global depclean or prune operations are not very safe when there are
14134         # missing dependencies since it's unknown how badly incomplete
14135         # the dependency graph is, and we might accidentally remove packages
14136         # that should have been pulled into the graph. On the other hand, it's
14137         # relatively safe to ignore missing deps when only asked to remove
14138         # specific packages.
14139         allow_missing_deps = len(myfiles) > 0
14140
14141         msg = []
14142         msg.append("Always study the list of packages to be cleaned for any obvious\n")
14143         msg.append("mistakes. Packages that are part of the world set will always\n")
14144         msg.append("be kept.  They can be manually added to this set with\n")
14145         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
14146         msg.append("package.provided (see portage(5)) will be removed by\n")
14147         msg.append("depclean, even if they are part of the world set.\n")
14148         msg.append("\n")
14149         msg.append("As a safety measure, depclean will not remove any packages\n")
14150         msg.append("unless *all* required dependencies have been resolved.  As a\n")
14151         msg.append("consequence, it is often necessary to run %s\n" % \
14152                 good("`emerge --update"))
14153         msg.append(good("--newuse --deep @system @world`") + \
14154                 " prior to depclean.\n")
14155
14156         if action == "depclean" and "--quiet" not in myopts and not myfiles:
14157                 portage.writemsg_stdout("\n")
14158                 for x in msg:
14159                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
14160
14161         xterm_titles = "notitles" not in settings.features
14162         myroot = settings["ROOT"]
14163         root_config = trees[myroot]["root_config"]
14164         getSetAtoms = root_config.setconfig.getSetAtoms
14165         vardb = trees[myroot]["vartree"].dbapi
14166         deselect = myopts.get('--deselect') != 'n'
14167
14168         required_set_names = ("system", "world")
14169         required_sets = {}
14170         set_args = []
14171
14172         for s in required_set_names:
14173                 required_sets[s] = InternalPackageSet(
14174                         initial_atoms=getSetAtoms(s))
14175
14176         
14177         # When removing packages, use a temporary version of world
14178         # which excludes packages that are intended to be eligible for
14179         # removal.
14180         world_temp_set = required_sets["world"]
14181         system_set = required_sets["system"]
14182
14183         if not system_set or not world_temp_set:
14184
14185                 if not system_set:
14186                         writemsg_level("!!! You have no system list.\n",
14187                                 level=logging.ERROR, noiselevel=-1)
14188
14189                 if not world_temp_set:
14190                         writemsg_level("!!! You have no world file.\n",
14191                                         level=logging.WARNING, noiselevel=-1)
14192
14193                 writemsg_level("!!! Proceeding is likely to " + \
14194                         "break your installation.\n",
14195                         level=logging.WARNING, noiselevel=-1)
14196                 if "--pretend" not in myopts:
14197                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14198
14199         if action == "depclean":
14200                 emergelog(xterm_titles, " >>> depclean")
14201
14202         import textwrap
14203         args_set = InternalPackageSet()
14204         if myfiles:
14205                 args_set.update(myfiles)
14206                 matched_packages = False
14207                 for x in args_set:
14208                         if vardb.match(x):
14209                                 matched_packages = True
14210                                 break
14211                 if not matched_packages:
14212                         writemsg_level(">>> No packages selected for removal by %s\n" % \
14213                                 action)
14214                         return
14215
14216         writemsg_level("\nCalculating dependencies  ")
14217         resolver_params = create_depgraph_params(myopts, "remove")
14218         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14219         vardb = resolver.trees[myroot]["vartree"].dbapi
14220
14221         if action == "depclean":
14222
14223                 if args_set:
14224
14225                         if deselect:
14226                                 world_temp_set.clear()
14227
14228                         # Pull in everything that's installed but not matched
14229                         # by an argument atom since we don't want to clean any
14230                         # package if something depends on it.
14231                         for pkg in vardb:
14232                                 spinner.update()
14233
14234                                 try:
14235                                         if args_set.findAtomForPackage(pkg) is None:
14236                                                 world_temp_set.add("=" + pkg.cpv)
14237                                                 continue
14238                                 except portage.exception.InvalidDependString, e:
14239                                         show_invalid_depstring_notice(pkg,
14240                                                 pkg.metadata["PROVIDE"], str(e))
14241                                         del e
14242                                         world_temp_set.add("=" + pkg.cpv)
14243                                         continue
14244
14245         elif action == "prune":
14246
14247                 if deselect:
14248                         world_temp_set.clear()
14249
14250                 # Pull in everything that's installed since we don't
14251                 # to prune a package if something depends on it.
14252                 world_temp_set.update(vardb.cp_all())
14253
14254                 if not args_set:
14255
14256                         # Try to prune everything that's slotted.
14257                         for cp in vardb.cp_all():
14258                                 if len(vardb.cp_list(cp)) > 1:
14259                                         args_set.add(cp)
14260
14261                 # Remove atoms from world that match installed packages
14262                 # that are also matched by argument atoms, but do not remove
14263                 # them if they match the highest installed version.
14264                 for pkg in vardb:
14265                         spinner.update()
14266                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14267                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
14268                                 raise AssertionError("package expected in matches: " + \
14269                                         "cp = %s, cpv = %s matches = %s" % \
14270                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14271
14272                         highest_version = pkgs_for_cp[-1]
14273                         if pkg == highest_version:
14274                                 # pkg is the highest version
14275                                 world_temp_set.add("=" + pkg.cpv)
14276                                 continue
14277
14278                         if len(pkgs_for_cp) <= 1:
14279                                 raise AssertionError("more packages expected: " + \
14280                                         "cp = %s, cpv = %s matches = %s" % \
14281                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14282
14283                         try:
14284                                 if args_set.findAtomForPackage(pkg) is None:
14285                                         world_temp_set.add("=" + pkg.cpv)
14286                                         continue
14287                         except portage.exception.InvalidDependString, e:
14288                                 show_invalid_depstring_notice(pkg,
14289                                         pkg.metadata["PROVIDE"], str(e))
14290                                 del e
14291                                 world_temp_set.add("=" + pkg.cpv)
14292                                 continue
14293
14294         set_args = {}
14295         for s, package_set in required_sets.iteritems():
14296                 set_atom = SETPREFIX + s
14297                 set_arg = SetArg(arg=set_atom, set=package_set,
14298                         root_config=resolver.roots[myroot])
14299                 set_args[s] = set_arg
14300                 for atom in set_arg.set:
14301                         resolver._dep_stack.append(
14302                                 Dependency(atom=atom, root=myroot, parent=set_arg))
14303                         resolver.digraph.add(set_arg, None)
14304
14305         success = resolver._complete_graph()
14306         writemsg_level("\b\b... done!\n")
14307
14308         resolver.display_problems()
14309
14310         if not success:
14311                 return 1
14312
14313         def unresolved_deps():
14314
14315                 unresolvable = set()
14316                 for dep in resolver._initially_unsatisfied_deps:
14317                         if isinstance(dep.parent, Package) and \
14318                                 (dep.priority > UnmergeDepPriority.SOFT):
14319                                 unresolvable.add((dep.atom, dep.parent.cpv))
14320
14321                 if not unresolvable:
14322                         return False
14323
14324                 if unresolvable and not allow_missing_deps:
14325                         prefix = bad(" * ")
14326                         msg = []
14327                         msg.append("Dependencies could not be completely resolved due to")
14328                         msg.append("the following required packages not being installed:")
14329                         msg.append("")
14330                         for atom, parent in unresolvable:
14331                                 msg.append("  %s pulled in by:" % (atom,))
14332                                 msg.append("    %s" % (parent,))
14333                                 msg.append("")
14334                         msg.append("Have you forgotten to run " + \
14335                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
14336                         msg.append(("to %s? It may be necessary to manually " + \
14337                                 "uninstall packages that no longer") % action)
14338                         msg.append("exist in the portage tree since " + \
14339                                 "it may not be possible to satisfy their")
14340                         msg.append("dependencies.  Also, be aware of " + \
14341                                 "the --with-bdeps option that is documented")
14342                         msg.append("in " + good("`man emerge`") + ".")
14343                         if action == "prune":
14344                                 msg.append("")
14345                                 msg.append("If you would like to ignore " + \
14346                                         "dependencies then use %s." % good("--nodeps"))
14347                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14348                                 level=logging.ERROR, noiselevel=-1)
14349                         return True
14350                 return False
14351
14352         if unresolved_deps():
14353                 return 1
14354
14355         graph = resolver.digraph.copy()
14356         required_pkgs_total = 0
14357         for node in graph:
14358                 if isinstance(node, Package):
14359                         required_pkgs_total += 1
14360
14361         def show_parents(child_node):
14362                 parent_nodes = graph.parent_nodes(child_node)
14363                 if not parent_nodes:
14364                         # With --prune, the highest version can be pulled in without any
14365                         # real parent since all installed packages are pulled in.  In that
14366                         # case there's nothing to show here.
14367                         return
14368                 parent_strs = []
14369                 for node in parent_nodes:
14370                         parent_strs.append(str(getattr(node, "cpv", node)))
14371                 parent_strs.sort()
14372                 msg = []
14373                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
14374                 for parent_str in parent_strs:
14375                         msg.append("    %s\n" % (parent_str,))
14376                 msg.append("\n")
14377                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14378
14379         def cmp_pkg_cpv(pkg1, pkg2):
14380                 """Sort Package instances by cpv."""
14381                 if pkg1.cpv > pkg2.cpv:
14382                         return 1
14383                 elif pkg1.cpv == pkg2.cpv:
14384                         return 0
14385                 else:
14386                         return -1
14387
14388         def create_cleanlist():
14389                 pkgs_to_remove = []
14390
14391                 if action == "depclean":
14392                         if args_set:
14393
14394                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14395                                         arg_atom = None
14396                                         try:
14397                                                 arg_atom = args_set.findAtomForPackage(pkg)
14398                                         except portage.exception.InvalidDependString:
14399                                                 # this error has already been displayed by now
14400                                                 continue
14401
14402                                         if arg_atom:
14403                                                 if pkg not in graph:
14404                                                         pkgs_to_remove.append(pkg)
14405                                                 elif "--verbose" in myopts:
14406                                                         show_parents(pkg)
14407
14408                         else:
14409                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14410                                         if pkg not in graph:
14411                                                 pkgs_to_remove.append(pkg)
14412                                         elif "--verbose" in myopts:
14413                                                 show_parents(pkg)
14414
14415                 elif action == "prune":
14416                         # Prune really uses all installed instead of world. It's not
14417                         # a real reverse dependency so don't display it as such.
14418                         graph.remove(set_args["world"])
14419
14420                         for atom in args_set:
14421                                 for pkg in vardb.match_pkgs(atom):
14422                                         if pkg not in graph:
14423                                                 pkgs_to_remove.append(pkg)
14424                                         elif "--verbose" in myopts:
14425                                                 show_parents(pkg)
14426
14427                 if not pkgs_to_remove:
14428                         writemsg_level(
14429                                 ">>> No packages selected for removal by %s\n" % action)
14430                         if "--verbose" not in myopts:
14431                                 writemsg_level(
14432                                         ">>> To see reverse dependencies, use %s\n" % \
14433                                                 good("--verbose"))
14434                         if action == "prune":
14435                                 writemsg_level(
14436                                         ">>> To ignore dependencies, use %s\n" % \
14437                                                 good("--nodeps"))
14438
14439                 return pkgs_to_remove
14440
14441         cleanlist = create_cleanlist()
14442
14443         if len(cleanlist):
14444                 clean_set = set(cleanlist)
14445
14446                 # Check if any of these package are the sole providers of libraries
14447                 # with consumers that have not been selected for removal. If so, these
14448                 # packages and any dependencies need to be added to the graph.
14449                 real_vardb = trees[myroot]["vartree"].dbapi
14450                 linkmap = real_vardb.linkmap
14451                 liblist = linkmap.listLibraryObjects()
14452                 consumer_cache = {}
14453                 provider_cache = {}
14454                 soname_cache = {}
14455                 consumer_map = {}
14456
14457                 writemsg_level(">>> Checking for lib consumers...\n")
14458
14459                 for pkg in cleanlist:
14460                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14461                         provided_libs = set()
14462
14463                         for lib in liblist:
14464                                 if pkg_dblink.isowner(lib, myroot):
14465                                         provided_libs.add(lib)
14466
14467                         if not provided_libs:
14468                                 continue
14469
14470                         consumers = {}
14471                         for lib in provided_libs:
14472                                 lib_consumers = consumer_cache.get(lib)
14473                                 if lib_consumers is None:
14474                                         lib_consumers = linkmap.findConsumers(lib)
14475                                         consumer_cache[lib] = lib_consumers
14476                                 if lib_consumers:
14477                                         consumers[lib] = lib_consumers
14478
14479                         if not consumers:
14480                                 continue
14481
14482                         for lib, lib_consumers in consumers.items():
14483                                 for consumer_file in list(lib_consumers):
14484                                         if pkg_dblink.isowner(consumer_file, myroot):
14485                                                 lib_consumers.remove(consumer_file)
14486                                 if not lib_consumers:
14487                                         del consumers[lib]
14488
14489                         if not consumers:
14490                                 continue
14491
14492                         for lib, lib_consumers in consumers.iteritems():
14493
14494                                 soname = soname_cache.get(lib)
14495                                 if soname is None:
14496                                         soname = linkmap.getSoname(lib)
14497                                         soname_cache[lib] = soname
14498
14499                                 consumer_providers = []
14500                                 for lib_consumer in lib_consumers:
14501                                         providers = provider_cache.get(lib)
14502                                         if providers is None:
14503                                                 providers = linkmap.findProviders(lib_consumer)
14504                                                 provider_cache[lib_consumer] = providers
14505                                         if soname not in providers:
14506                                                 # Why does this happen?
14507                                                 continue
14508                                         consumer_providers.append(
14509                                                 (lib_consumer, providers[soname]))
14510
14511                                 consumers[lib] = consumer_providers
14512
14513                         consumer_map[pkg] = consumers
14514
14515                 if consumer_map:
14516
14517                         search_files = set()
14518                         for consumers in consumer_map.itervalues():
14519                                 for lib, consumer_providers in consumers.iteritems():
14520                                         for lib_consumer, providers in consumer_providers:
14521                                                 search_files.add(lib_consumer)
14522                                                 search_files.update(providers)
14523
14524                         writemsg_level(">>> Assigning files to packages...\n")
14525                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14526
14527                         for pkg, consumers in consumer_map.items():
14528                                 for lib, consumer_providers in consumers.items():
14529                                         lib_consumers = set()
14530
14531                                         for lib_consumer, providers in consumer_providers:
14532                                                 owner_set = file_owners.get(lib_consumer)
14533                                                 provider_dblinks = set()
14534                                                 provider_pkgs = set()
14535
14536                                                 if len(providers) > 1:
14537                                                         for provider in providers:
14538                                                                 provider_set = file_owners.get(provider)
14539                                                                 if provider_set is not None:
14540                                                                         provider_dblinks.update(provider_set)
14541
14542                                                 if len(provider_dblinks) > 1:
14543                                                         for provider_dblink in provider_dblinks:
14544                                                                 pkg_key = ("installed", myroot,
14545                                                                         provider_dblink.mycpv, "nomerge")
14546                                                                 if pkg_key not in clean_set:
14547                                                                         provider_pkgs.add(vardb.get(pkg_key))
14548
14549                                                 if provider_pkgs:
14550                                                         continue
14551
14552                                                 if owner_set is not None:
14553                                                         lib_consumers.update(owner_set)
14554
14555                                         for consumer_dblink in list(lib_consumers):
14556                                                 if ("installed", myroot, consumer_dblink.mycpv,
14557                                                         "nomerge") in clean_set:
14558                                                         lib_consumers.remove(consumer_dblink)
14559                                                         continue
14560
14561                                         if lib_consumers:
14562                                                 consumers[lib] = lib_consumers
14563                                         else:
14564                                                 del consumers[lib]
14565                                 if not consumers:
14566                                         del consumer_map[pkg]
14567
14568                 if consumer_map:
14569                         # TODO: Implement a package set for rebuilding consumer packages.
14570
14571                         msg = "In order to avoid breakage of link level " + \
14572                                 "dependencies, one or more packages will not be removed. " + \
14573                                 "This can be solved by rebuilding " + \
14574                                 "the packages that pulled them in."
14575
14576                         prefix = bad(" * ")
14577                         from textwrap import wrap
14578                         writemsg_level("".join(prefix + "%s\n" % line for \
14579                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14580
14581                         msg = []
14582                         for pkg, consumers in consumer_map.iteritems():
14583                                 unique_consumers = set(chain(*consumers.values()))
14584                                 unique_consumers = sorted(consumer.mycpv \
14585                                         for consumer in unique_consumers)
14586                                 msg.append("")
14587                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14588                                 for consumer in unique_consumers:
14589                                         msg.append("    %s" % (consumer,))
14590                         msg.append("")
14591                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14592                                 level=logging.WARNING, noiselevel=-1)
14593
14594                         # Add lib providers to the graph as children of lib consumers,
14595                         # and also add any dependencies pulled in by the provider.
14596                         writemsg_level(">>> Adding lib providers to graph...\n")
14597
14598                         for pkg, consumers in consumer_map.iteritems():
14599                                 for consumer_dblink in set(chain(*consumers.values())):
14600                                         consumer_pkg = vardb.get(("installed", myroot,
14601                                                 consumer_dblink.mycpv, "nomerge"))
14602                                         if not resolver._add_pkg(pkg,
14603                                                 Dependency(parent=consumer_pkg,
14604                                                 priority=UnmergeDepPriority(runtime=True),
14605                                                 root=pkg.root)):
14606                                                 resolver.display_problems()
14607                                                 return 1
14608
14609                         writemsg_level("\nCalculating dependencies  ")
14610                         success = resolver._complete_graph()
14611                         writemsg_level("\b\b... done!\n")
14612                         resolver.display_problems()
14613                         if not success:
14614                                 return 1
14615                         if unresolved_deps():
14616                                 return 1
14617
14618                         graph = resolver.digraph.copy()
14619                         required_pkgs_total = 0
14620                         for node in graph:
14621                                 if isinstance(node, Package):
14622                                         required_pkgs_total += 1
14623                         cleanlist = create_cleanlist()
14624                         if not cleanlist:
14625                                 return 0
14626                         clean_set = set(cleanlist)
14627
14628                 # Use a topological sort to create an unmerge order such that
14629                 # each package is unmerged before it's dependencies. This is
14630                 # necessary to avoid breaking things that may need to run
14631                 # during pkg_prerm or pkg_postrm phases.
14632
14633                 # Create a new graph to account for dependencies between the
14634                 # packages being unmerged.
14635                 graph = digraph()
14636                 del cleanlist[:]
14637
14638                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14639                 runtime = UnmergeDepPriority(runtime=True)
14640                 runtime_post = UnmergeDepPriority(runtime_post=True)
14641                 buildtime = UnmergeDepPriority(buildtime=True)
14642                 priority_map = {
14643                         "RDEPEND": runtime,
14644                         "PDEPEND": runtime_post,
14645                         "DEPEND": buildtime,
14646                 }
14647
14648                 for node in clean_set:
14649                         graph.add(node, None)
14650                         mydeps = []
14651                         node_use = node.metadata["USE"].split()
14652                         for dep_type in dep_keys:
14653                                 depstr = node.metadata[dep_type]
14654                                 if not depstr:
14655                                         continue
14656                                 try:
14657                                         portage.dep._dep_check_strict = False
14658                                         success, atoms = portage.dep_check(depstr, None, settings,
14659                                                 myuse=node_use, trees=resolver._graph_trees,
14660                                                 myroot=myroot)
14661                                 finally:
14662                                         portage.dep._dep_check_strict = True
14663                                 if not success:
14664                                         # Ignore invalid deps of packages that will
14665                                         # be uninstalled anyway.
14666                                         continue
14667
14668                                 priority = priority_map[dep_type]
14669                                 for atom in atoms:
14670                                         if not isinstance(atom, portage.dep.Atom):
14671                                                 # Ignore invalid atoms returned from dep_check().
14672                                                 continue
14673                                         if atom.blocker:
14674                                                 continue
14675                                         matches = vardb.match_pkgs(atom)
14676                                         if not matches:
14677                                                 continue
14678                                         for child_node in matches:
14679                                                 if child_node in clean_set:
14680                                                         graph.add(child_node, node, priority=priority)
14681
14682                 ordered = True
14683                 if len(graph.order) == len(graph.root_nodes()):
14684                         # If there are no dependencies between packages
14685                         # let unmerge() group them by cat/pn.
14686                         ordered = False
14687                         cleanlist = [pkg.cpv for pkg in graph.order]
14688                 else:
14689                         # Order nodes from lowest to highest overall reference count for
14690                         # optimal root node selection.
14691                         node_refcounts = {}
14692                         for node in graph.order:
14693                                 node_refcounts[node] = len(graph.parent_nodes(node))
14694                         def cmp_reference_count(node1, node2):
14695                                 return node_refcounts[node1] - node_refcounts[node2]
14696                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14697         
14698                         ignore_priority_range = [None]
14699                         ignore_priority_range.extend(
14700                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14701                         while not graph.empty():
14702                                 for ignore_priority in ignore_priority_range:
14703                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14704                                         if nodes:
14705                                                 break
14706                                 if not nodes:
14707                                         raise AssertionError("no root nodes")
14708                                 if ignore_priority is not None:
14709                                         # Some deps have been dropped due to circular dependencies,
14710                                         # so only pop one node in order do minimize the number that
14711                                         # are dropped.
14712                                         del nodes[1:]
14713                                 for node in nodes:
14714                                         graph.remove(node)
14715                                         cleanlist.append(node.cpv)
14716
14717                 unmerge(root_config, myopts, "unmerge", cleanlist,
14718                         ldpath_mtimes, ordered=ordered)
14719
14720         if action == "prune":
14721                 return
14722
14723         if not cleanlist and "--quiet" in myopts:
14724                 return
14725
14726         print "Packages installed:   "+str(len(vardb.cpv_all()))
14727         print "Packages in world:    " + \
14728                 str(len(root_config.sets["world"].getAtoms()))
14729         print "Packages in system:   " + \
14730                 str(len(root_config.sets["system"].getAtoms()))
14731         print "Required packages:    "+str(required_pkgs_total)
14732         if "--pretend" in myopts:
14733                 print "Number to remove:     "+str(len(cleanlist))
14734         else:
14735                 print "Number removed:       "+str(len(cleanlist))
14736
14737 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14738         """
14739         Construct a depgraph for the given resume list. This will raise
14740         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14741         @rtype: tuple
14742         @returns: (success, depgraph, dropped_tasks)
14743         """
14744         skip_masked = True
14745         skip_unsatisfied = True
14746         mergelist = mtimedb["resume"]["mergelist"]
14747         dropped_tasks = set()
14748         while True:
14749                 mydepgraph = depgraph(settings, trees,
14750                         myopts, myparams, spinner)
14751                 try:
14752                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14753                                 skip_masked=skip_masked)
14754                 except depgraph.UnsatisfiedResumeDep, e:
14755                         if not skip_unsatisfied:
14756                                 raise
14757
14758                         graph = mydepgraph.digraph
14759                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14760                                 for dep in e.value)
14761                         traversed_nodes = set()
14762                         unsatisfied_stack = list(unsatisfied_parents)
14763                         while unsatisfied_stack:
14764                                 pkg = unsatisfied_stack.pop()
14765                                 if pkg in traversed_nodes:
14766                                         continue
14767                                 traversed_nodes.add(pkg)
14768
14769                                 # If this package was pulled in by a parent
14770                                 # package scheduled for merge, removing this
14771                                 # package may cause the the parent package's
14772                                 # dependency to become unsatisfied.
14773                                 for parent_node in graph.parent_nodes(pkg):
14774                                         if not isinstance(parent_node, Package) \
14775                                                 or parent_node.operation not in ("merge", "nomerge"):
14776                                                 continue
14777                                         unsatisfied = \
14778                                                 graph.child_nodes(parent_node,
14779                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14780                                         if pkg in unsatisfied:
14781                                                 unsatisfied_parents[parent_node] = parent_node
14782                                                 unsatisfied_stack.append(parent_node)
14783
14784                         pruned_mergelist = []
14785                         for x in mergelist:
14786                                 if isinstance(x, list) and \
14787                                         tuple(x) not in unsatisfied_parents:
14788                                         pruned_mergelist.append(x)
14789
14790                         # If the mergelist doesn't shrink then this loop is infinite.
14791                         if len(pruned_mergelist) == len(mergelist):
14792                                 # This happens if a package can't be dropped because
14793                                 # it's already installed, but it has unsatisfied PDEPEND.
14794                                 raise
14795                         mergelist[:] = pruned_mergelist
14796
14797                         # Exclude installed packages that have been removed from the graph due
14798                         # to failure to build/install runtime dependencies after the dependent
14799                         # package has already been installed.
14800                         dropped_tasks.update(pkg for pkg in \
14801                                 unsatisfied_parents if pkg.operation != "nomerge")
14802                         mydepgraph.break_refs(unsatisfied_parents)
14803
14804                         del e, graph, traversed_nodes, \
14805                                 unsatisfied_parents, unsatisfied_stack
14806                         continue
14807                 else:
14808                         break
14809         return (success, mydepgraph, dropped_tasks)
14810
14811 def action_build(settings, trees, mtimedb,
14812         myopts, myaction, myfiles, spinner):
14813
14814         # validate the state of the resume data
14815         # so that we can make assumptions later.
14816         for k in ("resume", "resume_backup"):
14817                 if k not in mtimedb:
14818                         continue
14819                 resume_data = mtimedb[k]
14820                 if not isinstance(resume_data, dict):
14821                         del mtimedb[k]
14822                         continue
14823                 mergelist = resume_data.get("mergelist")
14824                 if not isinstance(mergelist, list):
14825                         del mtimedb[k]
14826                         continue
14827                 for x in mergelist:
14828                         if not (isinstance(x, list) and len(x) == 4):
14829                                 continue
14830                         pkg_type, pkg_root, pkg_key, pkg_action = x
14831                         if pkg_root not in trees:
14832                                 # Current $ROOT setting differs,
14833                                 # so the list must be stale.
14834                                 mergelist = None
14835                                 break
14836                 if not mergelist:
14837                         del mtimedb[k]
14838                         continue
14839                 resume_opts = resume_data.get("myopts")
14840                 if not isinstance(resume_opts, (dict, list)):
14841                         del mtimedb[k]
14842                         continue
14843                 favorites = resume_data.get("favorites")
14844                 if not isinstance(favorites, list):
14845                         del mtimedb[k]
14846                         continue
14847
14848         resume = False
14849         if "--resume" in myopts and \
14850                 ("resume" in mtimedb or
14851                 "resume_backup" in mtimedb):
14852                 resume = True
14853                 if "resume" not in mtimedb:
14854                         mtimedb["resume"] = mtimedb["resume_backup"]
14855                         del mtimedb["resume_backup"]
14856                         mtimedb.commit()
14857                 # "myopts" is a list for backward compatibility.
14858                 resume_opts = mtimedb["resume"].get("myopts", [])
14859                 if isinstance(resume_opts, list):
14860                         resume_opts = dict((k,True) for k in resume_opts)
14861                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14862                         resume_opts.pop(opt, None)
14863
14864                 # Current options always override resume_opts.
14865                 resume_opts.update(myopts)
14866                 myopts.clear()
14867                 myopts.update(resume_opts)
14868
14869                 if "--debug" in myopts:
14870                         writemsg_level("myopts %s\n" % (myopts,))
14871
14872                 # Adjust config according to options of the command being resumed.
14873                 for myroot in trees:
14874                         mysettings =  trees[myroot]["vartree"].settings
14875                         mysettings.unlock()
14876                         adjust_config(myopts, mysettings)
14877                         mysettings.lock()
14878                         del myroot, mysettings
14879
14880         ldpath_mtimes = mtimedb["ldpath"]
14881         favorites=[]
14882         merge_count = 0
14883         buildpkgonly = "--buildpkgonly" in myopts
14884         pretend = "--pretend" in myopts
14885         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14886         ask = "--ask" in myopts
14887         nodeps = "--nodeps" in myopts
14888         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14889         tree = "--tree" in myopts
14890         if nodeps and tree:
14891                 tree = False
14892                 del myopts["--tree"]
14893                 portage.writemsg(colorize("WARN", " * ") + \
14894                         "--tree is broken with --nodeps. Disabling...\n")
14895         debug = "--debug" in myopts
14896         verbose = "--verbose" in myopts
14897         quiet = "--quiet" in myopts
14898         if pretend or fetchonly:
14899                 # make the mtimedb readonly
14900                 mtimedb.filename = None
14901         if '--digest' in myopts or 'digest' in settings.features:
14902                 if '--digest' in myopts:
14903                         msg = "The --digest option"
14904                 else:
14905                         msg = "The FEATURES=digest setting"
14906
14907                 msg += " can prevent corruption from being" + \
14908                         " noticed. The `repoman manifest` command is the preferred" + \
14909                         " way to generate manifests and it is capable of doing an" + \
14910                         " entire repository or category at once."
14911                 prefix = bad(" * ")
14912                 writemsg(prefix + "\n")
14913                 from textwrap import wrap
14914                 for line in wrap(msg, 72):
14915                         writemsg("%s%s\n" % (prefix, line))
14916                 writemsg(prefix + "\n")
14917
14918         if "--quiet" not in myopts and \
14919                 ("--pretend" in myopts or "--ask" in myopts or \
14920                 "--tree" in myopts or "--verbose" in myopts):
14921                 action = ""
14922                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14923                         action = "fetched"
14924                 elif "--buildpkgonly" in myopts:
14925                         action = "built"
14926                 else:
14927                         action = "merged"
14928                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14929                         print
14930                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14931                         print
14932                 else:
14933                         print
14934                         print darkgreen("These are the packages that would be %s, in order:") % action
14935                         print
14936
14937         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14938         if not show_spinner:
14939                 spinner.update = spinner.update_quiet
14940
14941         if resume:
14942                 favorites = mtimedb["resume"].get("favorites")
14943                 if not isinstance(favorites, list):
14944                         favorites = []
14945
14946                 if show_spinner:
14947                         print "Calculating dependencies  ",
14948                 myparams = create_depgraph_params(myopts, myaction)
14949
14950                 resume_data = mtimedb["resume"]
14951                 mergelist = resume_data["mergelist"]
14952                 if mergelist and "--skipfirst" in myopts:
14953                         for i, task in enumerate(mergelist):
14954                                 if isinstance(task, list) and \
14955                                         task and task[-1] == "merge":
14956                                         del mergelist[i]
14957                                         break
14958
14959                 success = False
14960                 mydepgraph = None
14961                 try:
14962                         success, mydepgraph, dropped_tasks = resume_depgraph(
14963                                 settings, trees, mtimedb, myopts, myparams, spinner)
14964                 except (portage.exception.PackageNotFound,
14965                         depgraph.UnsatisfiedResumeDep), e:
14966                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14967                                 mydepgraph = e.depgraph
14968                         if show_spinner:
14969                                 print
14970                         from textwrap import wrap
14971                         from portage.output import EOutput
14972                         out = EOutput()
14973
14974                         resume_data = mtimedb["resume"]
14975                         mergelist = resume_data.get("mergelist")
14976                         if not isinstance(mergelist, list):
14977                                 mergelist = []
14978                         if mergelist and debug or (verbose and not quiet):
14979                                 out.eerror("Invalid resume list:")
14980                                 out.eerror("")
14981                                 indent = "  "
14982                                 for task in mergelist:
14983                                         if isinstance(task, list):
14984                                                 out.eerror(indent + str(tuple(task)))
14985                                 out.eerror("")
14986
14987                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14988                                 out.eerror("One or more packages are either masked or " + \
14989                                         "have missing dependencies:")
14990                                 out.eerror("")
14991                                 indent = "  "
14992                                 for dep in e.value:
14993                                         if dep.atom is None:
14994                                                 out.eerror(indent + "Masked package:")
14995                                                 out.eerror(2 * indent + str(dep.parent))
14996                                                 out.eerror("")
14997                                         else:
14998                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14999                                                 out.eerror(2 * indent + str(dep.parent))
15000                                                 out.eerror("")
15001                                 msg = "The resume list contains packages " + \
15002                                         "that are either masked or have " + \
15003                                         "unsatisfied dependencies. " + \
15004                                         "Please restart/continue " + \
15005                                         "the operation manually, or use --skipfirst " + \
15006                                         "to skip the first package in the list and " + \
15007                                         "any other packages that may be " + \
15008                                         "masked or have missing dependencies."
15009                                 for line in wrap(msg, 72):
15010                                         out.eerror(line)
15011                         elif isinstance(e, portage.exception.PackageNotFound):
15012                                 out.eerror("An expected package is " + \
15013                                         "not available: %s" % str(e))
15014                                 out.eerror("")
15015                                 msg = "The resume list contains one or more " + \
15016                                         "packages that are no longer " + \
15017                                         "available. Please restart/continue " + \
15018                                         "the operation manually."
15019                                 for line in wrap(msg, 72):
15020                                         out.eerror(line)
15021                 else:
15022                         if show_spinner:
15023                                 print "\b\b... done!"
15024
15025                 if success:
15026                         if dropped_tasks:
15027                                 portage.writemsg("!!! One or more packages have been " + \
15028                                         "dropped due to\n" + \
15029                                         "!!! masking or unsatisfied dependencies:\n\n",
15030                                         noiselevel=-1)
15031                                 for task in dropped_tasks:
15032                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
15033                                 portage.writemsg("\n", noiselevel=-1)
15034                         del dropped_tasks
15035                 else:
15036                         if mydepgraph is not None:
15037                                 mydepgraph.display_problems()
15038                         if not (ask or pretend):
15039                                 # delete the current list and also the backup
15040                                 # since it's probably stale too.
15041                                 for k in ("resume", "resume_backup"):
15042                                         mtimedb.pop(k, None)
15043                                 mtimedb.commit()
15044
15045                         return 1
15046         else:
15047                 if ("--resume" in myopts):
15048                         print darkgreen("emerge: It seems we have nothing to resume...")
15049                         return os.EX_OK
15050
15051                 myparams = create_depgraph_params(myopts, myaction)
15052                 if "--quiet" not in myopts and "--nodeps" not in myopts:
15053                         print "Calculating dependencies  ",
15054                         sys.stdout.flush()
15055                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15056                 try:
15057                         retval, favorites = mydepgraph.select_files(myfiles)
15058                 except portage.exception.PackageNotFound, e:
15059                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15060                         return 1
15061                 except portage.exception.PackageSetNotFound, e:
15062                         root_config = trees[settings["ROOT"]]["root_config"]
15063                         display_missing_pkg_set(root_config, e.value)
15064                         return 1
15065                 if show_spinner:
15066                         print "\b\b... done!"
15067                 if not retval:
15068                         mydepgraph.display_problems()
15069                         return 1
15070
15071         if "--pretend" not in myopts and \
15072                 ("--ask" in myopts or "--tree" in myopts or \
15073                 "--verbose" in myopts) and \
15074                 not ("--quiet" in myopts and "--ask" not in myopts):
15075                 if "--resume" in myopts:
15076                         mymergelist = mydepgraph.altlist()
15077                         if len(mymergelist) == 0:
15078                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15079                                 return os.EX_OK
15080                         favorites = mtimedb["resume"]["favorites"]
15081                         retval = mydepgraph.display(
15082                                 mydepgraph.altlist(reversed=tree),
15083                                 favorites=favorites)
15084                         mydepgraph.display_problems()
15085                         if retval != os.EX_OK:
15086                                 return retval
15087                         prompt="Would you like to resume merging these packages?"
15088                 else:
15089                         retval = mydepgraph.display(
15090                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15091                                 favorites=favorites)
15092                         mydepgraph.display_problems()
15093                         if retval != os.EX_OK:
15094                                 return retval
15095                         mergecount=0
15096                         for x in mydepgraph.altlist():
15097                                 if isinstance(x, Package) and x.operation == "merge":
15098                                         mergecount += 1
15099
15100                         if mergecount==0:
15101                                 sets = trees[settings["ROOT"]]["root_config"].sets
15102                                 world_candidates = None
15103                                 if "--noreplace" in myopts and \
15104                                         not oneshot and favorites:
15105                                         # Sets that are not world candidates are filtered
15106                                         # out here since the favorites list needs to be
15107                                         # complete for depgraph.loadResumeCommand() to
15108                                         # operate correctly.
15109                                         world_candidates = [x for x in favorites \
15110                                                 if not (x.startswith(SETPREFIX) and \
15111                                                 not sets[x[1:]].world_candidate)]
15112                                 if "--noreplace" in myopts and \
15113                                         not oneshot and world_candidates:
15114                                         print
15115                                         for x in world_candidates:
15116                                                 print " %s %s" % (good("*"), x)
15117                                         prompt="Would you like to add these packages to your world favorites?"
15118                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15119                                         prompt="Nothing to merge; would you like to auto-clean packages?"
15120                                 else:
15121                                         print
15122                                         print "Nothing to merge; quitting."
15123                                         print
15124                                         return os.EX_OK
15125                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15126                                 prompt="Would you like to fetch the source files for these packages?"
15127                         else:
15128                                 prompt="Would you like to merge these packages?"
15129                 print
15130                 if "--ask" in myopts and userquery(prompt) == "No":
15131                         print
15132                         print "Quitting."
15133                         print
15134                         return os.EX_OK
15135                 # Don't ask again (e.g. when auto-cleaning packages after merge)
15136                 myopts.pop("--ask", None)
15137
15138         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15139                 if ("--resume" in myopts):
15140                         mymergelist = mydepgraph.altlist()
15141                         if len(mymergelist) == 0:
15142                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15143                                 return os.EX_OK
15144                         favorites = mtimedb["resume"]["favorites"]
15145                         retval = mydepgraph.display(
15146                                 mydepgraph.altlist(reversed=tree),
15147                                 favorites=favorites)
15148                         mydepgraph.display_problems()
15149                         if retval != os.EX_OK:
15150                                 return retval
15151                 else:
15152                         retval = mydepgraph.display(
15153                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15154                                 favorites=favorites)
15155                         mydepgraph.display_problems()
15156                         if retval != os.EX_OK:
15157                                 return retval
15158                         if "--buildpkgonly" in myopts:
15159                                 graph_copy = mydepgraph.digraph.clone()
15160                                 removed_nodes = set()
15161                                 for node in graph_copy:
15162                                         if not isinstance(node, Package) or \
15163                                                 node.operation == "nomerge":
15164                                                 removed_nodes.add(node)
15165                                 graph_copy.difference_update(removed_nodes)
15166                                 if not graph_copy.hasallzeros(ignore_priority = \
15167                                         DepPrioritySatisfiedRange.ignore_medium):
15168                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
15169                                         print "!!! You have to merge the dependencies before you can build this package.\n"
15170                                         return 1
15171         else:
15172                 if "--buildpkgonly" in myopts:
15173                         graph_copy = mydepgraph.digraph.clone()
15174                         removed_nodes = set()
15175                         for node in graph_copy:
15176                                 if not isinstance(node, Package) or \
15177                                         node.operation == "nomerge":
15178                                         removed_nodes.add(node)
15179                         graph_copy.difference_update(removed_nodes)
15180                         if not graph_copy.hasallzeros(ignore_priority = \
15181                                 DepPrioritySatisfiedRange.ignore_medium):
15182                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15183                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15184                                 return 1
15185
15186                 if ("--resume" in myopts):
15187                         favorites=mtimedb["resume"]["favorites"]
15188                         mymergelist = mydepgraph.altlist()
15189                         mydepgraph.break_refs(mymergelist)
15190                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15191                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15192                         del mydepgraph, mymergelist
15193                         clear_caches(trees)
15194
15195                         retval = mergetask.merge()
15196                         merge_count = mergetask.curval
15197                 else:
15198                         if "resume" in mtimedb and \
15199                         "mergelist" in mtimedb["resume"] and \
15200                         len(mtimedb["resume"]["mergelist"]) > 1:
15201                                 mtimedb["resume_backup"] = mtimedb["resume"]
15202                                 del mtimedb["resume"]
15203                                 mtimedb.commit()
15204                         mtimedb["resume"]={}
15205                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
15206                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15207                         # a list type for options.
15208                         mtimedb["resume"]["myopts"] = myopts.copy()
15209
15210                         # Convert Atom instances to plain str.
15211                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15212
15213                         pkglist = mydepgraph.altlist()
15214                         mydepgraph.saveNomergeFavorites()
15215                         mydepgraph.break_refs(pkglist)
15216                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15217                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15218                         del mydepgraph, pkglist
15219                         clear_caches(trees)
15220
15221                         retval = mergetask.merge()
15222                         merge_count = mergetask.curval
15223
15224                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15225                         if "yes" == settings.get("AUTOCLEAN"):
15226                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15227                                 unmerge(trees[settings["ROOT"]]["root_config"],
15228                                         myopts, "clean", [],
15229                                         ldpath_mtimes, autoclean=1)
15230                         else:
15231                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15232                                         + " AUTOCLEAN is disabled.  This can cause serious"
15233                                         + " problems due to overlapping packages.\n")
15234                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15235
15236                 return retval
15237
15238 def multiple_actions(action1, action2):
15239         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15240         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15241         sys.exit(1)
15242
15243 def insert_optional_args(args):
15244         """
15245         Parse optional arguments and insert a value if one has
15246         not been provided. This is done before feeding the args
15247         to the optparse parser since that parser does not support
15248         this feature natively.
15249         """
15250
15251         new_args = []
15252         jobs_opts = ("-j", "--jobs")
15253         default_arg_opts = {
15254                 '--deselect'   : ('n',),
15255                 '--root-deps'  : ('rdeps',),
15256         }
15257         arg_stack = args[:]
15258         arg_stack.reverse()
15259         while arg_stack:
15260                 arg = arg_stack.pop()
15261
15262                 default_arg_choices = default_arg_opts.get(arg)
15263                 if default_arg_choices is not None:
15264                         new_args.append(arg)
15265                         if arg_stack and arg_stack[-1] in default_arg_choices:
15266                                 new_args.append(arg_stack.pop())
15267                         else:
15268                                 # insert default argument
15269                                 new_args.append('True')
15270                         continue
15271
15272                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15273                 if not (short_job_opt or arg in jobs_opts):
15274                         new_args.append(arg)
15275                         continue
15276
15277                 # Insert an empty placeholder in order to
15278                 # satisfy the requirements of optparse.
15279
15280                 new_args.append("--jobs")
15281                 job_count = None
15282                 saved_opts = None
15283                 if short_job_opt and len(arg) > 2:
15284                         if arg[:2] == "-j":
15285                                 try:
15286                                         job_count = int(arg[2:])
15287                                 except ValueError:
15288                                         saved_opts = arg[2:]
15289                         else:
15290                                 job_count = "True"
15291                                 saved_opts = arg[1:].replace("j", "")
15292
15293                 if job_count is None and arg_stack:
15294                         try:
15295                                 job_count = int(arg_stack[-1])
15296                         except ValueError:
15297                                 pass
15298                         else:
15299                                 # Discard the job count from the stack
15300                                 # since we're consuming it here.
15301                                 arg_stack.pop()
15302
15303                 if job_count is None:
15304                         # unlimited number of jobs
15305                         new_args.append("True")
15306                 else:
15307                         new_args.append(str(job_count))
15308
15309                 if saved_opts is not None:
15310                         new_args.append("-" + saved_opts)
15311
15312         return new_args
15313
15314 def parse_opts(tmpcmdline, silent=False):
15315         myaction=None
15316         myopts = {}
15317         myfiles=[]
15318
15319         global actions, options, shortmapping
15320
15321         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15322         argument_options = {
15323                 "--config-root": {
15324                         "help":"specify the location for portage configuration files",
15325                         "action":"store"
15326                 },
15327                 "--color": {
15328                         "help":"enable or disable color output",
15329                         "type":"choice",
15330                         "choices":("y", "n")
15331                 },
15332
15333                 "--deselect": {
15334                         "help"    : "remove atoms from the world file",
15335                         "type"    : "choice",
15336                         "choices" : ("True", "n")
15337                 },
15338
15339                 "--jobs": {
15340
15341                         "help"   : "Specifies the number of packages to build " + \
15342                                 "simultaneously.",
15343
15344                         "action" : "store"
15345                 },
15346
15347                 "--load-average": {
15348
15349                         "help"   :"Specifies that no new builds should be started " + \
15350                                 "if there are other builds running and the load average " + \
15351                                 "is at least LOAD (a floating-point number).",
15352
15353                         "action" : "store"
15354                 },
15355
15356                 "--with-bdeps": {
15357                         "help":"include unnecessary build time dependencies",
15358                         "type":"choice",
15359                         "choices":("y", "n")
15360                 },
15361                 "--reinstall": {
15362                         "help":"specify conditions to trigger package reinstallation",
15363                         "type":"choice",
15364                         "choices":["changed-use"]
15365                 },
15366                 "--root": {
15367                  "help"   : "specify the target root filesystem for merging packages",
15368                  "action" : "store"
15369                 },
15370
15371                 "--root-deps": {
15372                         "help"    : "modify interpretation of depedencies",
15373                         "type"    : "choice",
15374                         "choices" :("True", "rdeps")
15375                 },
15376         }
15377
15378         from optparse import OptionParser
15379         parser = OptionParser()
15380         if parser.has_option("--help"):
15381                 parser.remove_option("--help")
15382
15383         for action_opt in actions:
15384                 parser.add_option("--" + action_opt, action="store_true",
15385                         dest=action_opt.replace("-", "_"), default=False)
15386         for myopt in options:
15387                 parser.add_option(myopt, action="store_true",
15388                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15389         for shortopt, longopt in shortmapping.iteritems():
15390                 parser.add_option("-" + shortopt, action="store_true",
15391                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
15392         for myalias, myopt in longopt_aliases.iteritems():
15393                 parser.add_option(myalias, action="store_true",
15394                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15395
15396         for myopt, kwargs in argument_options.iteritems():
15397                 parser.add_option(myopt,
15398                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15399
15400         tmpcmdline = insert_optional_args(tmpcmdline)
15401
15402         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15403
15404         if myoptions.deselect == "True":
15405                 myoptions.deselect = True
15406
15407         if myoptions.root_deps == "True":
15408                 myoptions.root_deps = True
15409
15410         if myoptions.jobs:
15411                 jobs = None
15412                 if myoptions.jobs == "True":
15413                         jobs = True
15414                 else:
15415                         try:
15416                                 jobs = int(myoptions.jobs)
15417                         except ValueError:
15418                                 jobs = -1
15419
15420                 if jobs is not True and \
15421                         jobs < 1:
15422                         jobs = None
15423                         if not silent:
15424                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15425                                         (myoptions.jobs,), noiselevel=-1)
15426
15427                 myoptions.jobs = jobs
15428
15429         if myoptions.load_average:
15430                 try:
15431                         load_average = float(myoptions.load_average)
15432                 except ValueError:
15433                         load_average = 0.0
15434
15435                 if load_average <= 0.0:
15436                         load_average = None
15437                         if not silent:
15438                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15439                                         (myoptions.load_average,), noiselevel=-1)
15440
15441                 myoptions.load_average = load_average
15442
15443         for myopt in options:
15444                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15445                 if v:
15446                         myopts[myopt] = True
15447
15448         for myopt in argument_options:
15449                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15450                 if v is not None:
15451                         myopts[myopt] = v
15452
15453         if myoptions.searchdesc:
15454                 myoptions.search = True
15455
15456         for action_opt in actions:
15457                 v = getattr(myoptions, action_opt.replace("-", "_"))
15458                 if v:
15459                         if myaction:
15460                                 multiple_actions(myaction, action_opt)
15461                                 sys.exit(1)
15462                         myaction = action_opt
15463
15464         if myaction is None and myoptions.deselect is True:
15465                 myaction = 'deselect'
15466
15467         myfiles += myargs
15468
15469         return myaction, myopts, myfiles
15470
15471 def validate_ebuild_environment(trees):
15472         for myroot in trees:
15473                 settings = trees[myroot]["vartree"].settings
15474                 settings.validate()
15475
15476 def clear_caches(trees):
15477         for d in trees.itervalues():
15478                 d["porttree"].dbapi.melt()
15479                 d["porttree"].dbapi._aux_cache.clear()
15480                 d["bintree"].dbapi._aux_cache.clear()
15481                 d["bintree"].dbapi._clear_cache()
15482                 d["vartree"].dbapi.linkmap._clear_cache()
15483         portage.dircache.clear()
15484         gc.collect()
15485
15486 def load_emerge_config(trees=None):
15487         kwargs = {}
15488         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15489                 v = os.environ.get(envvar, None)
15490                 if v and v.strip():
15491                         kwargs[k] = v
15492         trees = portage.create_trees(trees=trees, **kwargs)
15493
15494         for root, root_trees in trees.iteritems():
15495                 settings = root_trees["vartree"].settings
15496                 setconfig = load_default_config(settings, root_trees)
15497                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15498
15499         settings = trees["/"]["vartree"].settings
15500
15501         for myroot in trees:
15502                 if myroot != "/":
15503                         settings = trees[myroot]["vartree"].settings
15504                         break
15505
15506         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15507         mtimedb = portage.MtimeDB(mtimedbfile)
15508         
15509         return settings, trees, mtimedb
15510
15511 def adjust_config(myopts, settings):
15512         """Make emerge specific adjustments to the config."""
15513
15514         # To enhance usability, make some vars case insensitive by forcing them to
15515         # lower case.
15516         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15517                 if myvar in settings:
15518                         settings[myvar] = settings[myvar].lower()
15519                         settings.backup_changes(myvar)
15520         del myvar
15521
15522         # Kill noauto as it will break merges otherwise.
15523         if "noauto" in settings.features:
15524                 settings.features.remove('noauto')
15525                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15526                 settings.backup_changes("FEATURES")
15527
15528         CLEAN_DELAY = 5
15529         try:
15530                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15531         except ValueError, e:
15532                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15533                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15534                         settings["CLEAN_DELAY"], noiselevel=-1)
15535         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15536         settings.backup_changes("CLEAN_DELAY")
15537
15538         EMERGE_WARNING_DELAY = 10
15539         try:
15540                 EMERGE_WARNING_DELAY = int(settings.get(
15541                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15542         except ValueError, e:
15543                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15544                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15545                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15546         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15547         settings.backup_changes("EMERGE_WARNING_DELAY")
15548
15549         if "--quiet" in myopts:
15550                 settings["PORTAGE_QUIET"]="1"
15551                 settings.backup_changes("PORTAGE_QUIET")
15552
15553         if "--verbose" in myopts:
15554                 settings["PORTAGE_VERBOSE"] = "1"
15555                 settings.backup_changes("PORTAGE_VERBOSE")
15556
15557         # Set so that configs will be merged regardless of remembered status
15558         if ("--noconfmem" in myopts):
15559                 settings["NOCONFMEM"]="1"
15560                 settings.backup_changes("NOCONFMEM")
15561
15562         # Set various debug markers... They should be merged somehow.
15563         PORTAGE_DEBUG = 0
15564         try:
15565                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15566                 if PORTAGE_DEBUG not in (0, 1):
15567                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15568                                 PORTAGE_DEBUG, noiselevel=-1)
15569                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15570                                 noiselevel=-1)
15571                         PORTAGE_DEBUG = 0
15572         except ValueError, e:
15573                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15574                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15575                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15576                 del e
15577         if "--debug" in myopts:
15578                 PORTAGE_DEBUG = 1
15579         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15580         settings.backup_changes("PORTAGE_DEBUG")
15581
15582         if settings.get("NOCOLOR") not in ("yes","true"):
15583                 portage.output.havecolor = 1
15584
15585         """The explicit --color < y | n > option overrides the NOCOLOR environment
15586         variable and stdout auto-detection."""
15587         if "--color" in myopts:
15588                 if "y" == myopts["--color"]:
15589                         portage.output.havecolor = 1
15590                         settings["NOCOLOR"] = "false"
15591                 else:
15592                         portage.output.havecolor = 0
15593                         settings["NOCOLOR"] = "true"
15594                 settings.backup_changes("NOCOLOR")
15595         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15596                 portage.output.havecolor = 0
15597                 settings["NOCOLOR"] = "true"
15598                 settings.backup_changes("NOCOLOR")
15599
15600 def apply_priorities(settings):
15601         ionice(settings)
15602         nice(settings)
15603
15604 def nice(settings):
15605         try:
15606                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15607         except (OSError, ValueError), e:
15608                 out = portage.output.EOutput()
15609                 out.eerror("Failed to change nice value to '%s'" % \
15610                         settings["PORTAGE_NICENESS"])
15611                 out.eerror("%s\n" % str(e))
15612
15613 def ionice(settings):
15614
15615         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15616         if ionice_cmd:
15617                 ionice_cmd = shlex.split(ionice_cmd)
15618         if not ionice_cmd:
15619                 return
15620
15621         from portage.util import varexpand
15622         variables = {"PID" : str(os.getpid())}
15623         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15624
15625         try:
15626                 rval = portage.process.spawn(cmd, env=os.environ)
15627         except portage.exception.CommandNotFound:
15628                 # The OS kernel probably doesn't support ionice,
15629                 # so return silently.
15630                 return
15631
15632         if rval != os.EX_OK:
15633                 out = portage.output.EOutput()
15634                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15635                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15636
15637 def display_missing_pkg_set(root_config, set_name):
15638
15639         msg = []
15640         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15641                 "The following sets exist:") % \
15642                 colorize("INFORM", set_name))
15643         msg.append("")
15644
15645         for s in sorted(root_config.sets):
15646                 msg.append("    %s" % s)
15647         msg.append("")
15648
15649         writemsg_level("".join("%s\n" % l for l in msg),
15650                 level=logging.ERROR, noiselevel=-1)
15651
15652 def expand_set_arguments(myfiles, myaction, root_config):
15653         retval = os.EX_OK
15654         setconfig = root_config.setconfig
15655
15656         sets = setconfig.getSets()
15657
15658         # In order to know exactly which atoms/sets should be added to the
15659         # world file, the depgraph performs set expansion later. It will get
15660         # confused about where the atoms came from if it's not allowed to
15661         # expand them itself.
15662         do_not_expand = (None, )
15663         newargs = []
15664         for a in myfiles:
15665                 if a in ("system", "world"):
15666                         newargs.append(SETPREFIX+a)
15667                 else:
15668                         newargs.append(a)
15669         myfiles = newargs
15670         del newargs
15671         newargs = []
15672
15673         # separators for set arguments
15674         ARG_START = "{"
15675         ARG_END = "}"
15676
15677         # WARNING: all operators must be of equal length
15678         IS_OPERATOR = "/@"
15679         DIFF_OPERATOR = "-@"
15680         UNION_OPERATOR = "+@"
15681         
15682         for i in range(0, len(myfiles)):
15683                 if myfiles[i].startswith(SETPREFIX):
15684                         start = 0
15685                         end = 0
15686                         x = myfiles[i][len(SETPREFIX):]
15687                         newset = ""
15688                         while x:
15689                                 start = x.find(ARG_START)
15690                                 end = x.find(ARG_END)
15691                                 if start > 0 and start < end:
15692                                         namepart = x[:start]
15693                                         argpart = x[start+1:end]
15694                                 
15695                                         # TODO: implement proper quoting
15696                                         args = argpart.split(",")
15697                                         options = {}
15698                                         for a in args:
15699                                                 if "=" in a:
15700                                                         k, v  = a.split("=", 1)
15701                                                         options[k] = v
15702                                                 else:
15703                                                         options[a] = "True"
15704                                         setconfig.update(namepart, options)
15705                                         newset += (x[:start-len(namepart)]+namepart)
15706                                         x = x[end+len(ARG_END):]
15707                                 else:
15708                                         newset += x
15709                                         x = ""
15710                         myfiles[i] = SETPREFIX+newset
15711                                 
15712         sets = setconfig.getSets()
15713
15714         # display errors that occured while loading the SetConfig instance
15715         for e in setconfig.errors:
15716                 print colorize("BAD", "Error during set creation: %s" % e)
15717         
15718         # emerge relies on the existance of sets with names "world" and "system"
15719         required_sets = ("world", "system")
15720         missing_sets = []
15721
15722         for s in required_sets:
15723                 if s not in sets:
15724                         missing_sets.append(s)
15725         if missing_sets:
15726                 if len(missing_sets) > 2:
15727                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15728                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15729                 elif len(missing_sets) == 2:
15730                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15731                 else:
15732                         missing_sets_str = '"%s"' % missing_sets[-1]
15733                 msg = ["emerge: incomplete set configuration, " + \
15734                         "missing set(s): %s" % missing_sets_str]
15735                 if sets:
15736                         msg.append("        sets defined: %s" % ", ".join(sets))
15737                 msg.append("        This usually means that '%s'" % \
15738                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15739                 msg.append("        is missing or corrupt.")
15740                 for line in msg:
15741                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15742                 return (None, 1)
15743         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15744
15745         for a in myfiles:
15746                 if a.startswith(SETPREFIX):
15747                         # support simple set operations (intersection, difference and union)
15748                         # on the commandline. Expressions are evaluated strictly left-to-right
15749                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15750                                 expression = a[len(SETPREFIX):]
15751                                 expr_sets = []
15752                                 expr_ops = []
15753                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15754                                         is_pos = expression.rfind(IS_OPERATOR)
15755                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15756                                         union_pos = expression.rfind(UNION_OPERATOR)
15757                                         op_pos = max(is_pos, diff_pos, union_pos)
15758                                         s1 = expression[:op_pos]
15759                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15760                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15761                                         if not s2 in sets:
15762                                                 display_missing_pkg_set(root_config, s2)
15763                                                 return (None, 1)
15764                                         expr_sets.insert(0, s2)
15765                                         expr_ops.insert(0, op)
15766                                         expression = s1
15767                                 if not expression in sets:
15768                                         display_missing_pkg_set(root_config, expression)
15769                                         return (None, 1)
15770                                 expr_sets.insert(0, expression)
15771                                 result = set(setconfig.getSetAtoms(expression))
15772                                 for i in range(0, len(expr_ops)):
15773                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15774                                         if expr_ops[i] == IS_OPERATOR:
15775                                                 result.intersection_update(s2)
15776                                         elif expr_ops[i] == DIFF_OPERATOR:
15777                                                 result.difference_update(s2)
15778                                         elif expr_ops[i] == UNION_OPERATOR:
15779                                                 result.update(s2)
15780                                         else:
15781                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15782                                 newargs.extend(result)
15783                         else:                   
15784                                 s = a[len(SETPREFIX):]
15785                                 if s not in sets:
15786                                         display_missing_pkg_set(root_config, s)
15787                                         return (None, 1)
15788                                 setconfig.active.append(s)
15789                                 try:
15790                                         set_atoms = setconfig.getSetAtoms(s)
15791                                 except portage.exception.PackageSetNotFound, e:
15792                                         writemsg_level(("emerge: the given set '%s' " + \
15793                                                 "contains a non-existent set named '%s'.\n") % \
15794                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15795                                         return (None, 1)
15796                                 if myaction in unmerge_actions and \
15797                                                 not sets[s].supportsOperation("unmerge"):
15798                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15799                                                 "not support unmerge operations\n")
15800                                         retval = 1
15801                                 elif not set_atoms:
15802                                         print "emerge: '%s' is an empty set" % s
15803                                 elif myaction not in do_not_expand:
15804                                         newargs.extend(set_atoms)
15805                                 else:
15806                                         newargs.append(SETPREFIX+s)
15807                                 for e in sets[s].errors:
15808                                         print e
15809                 else:
15810                         newargs.append(a)
15811         return (newargs, retval)
15812
15813 def repo_name_check(trees):
15814         missing_repo_names = set()
15815         for root, root_trees in trees.iteritems():
15816                 if "porttree" in root_trees:
15817                         portdb = root_trees["porttree"].dbapi
15818                         missing_repo_names.update(portdb.porttrees)
15819                         repos = portdb.getRepositories()
15820                         for r in repos:
15821                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15822                         if portdb.porttree_root in missing_repo_names and \
15823                                 not os.path.exists(os.path.join(
15824                                 portdb.porttree_root, "profiles")):
15825                                 # This is normal if $PORTDIR happens to be empty,
15826                                 # so don't warn about it.
15827                                 missing_repo_names.remove(portdb.porttree_root)
15828
15829         if missing_repo_names:
15830                 msg = []
15831                 msg.append("WARNING: One or more repositories " + \
15832                         "have missing repo_name entries:")
15833                 msg.append("")
15834                 for p in missing_repo_names:
15835                         msg.append("\t%s/profiles/repo_name" % (p,))
15836                 msg.append("")
15837                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15838                         "should be a plain text file containing a unique " + \
15839                         "name for the repository on the first line.", 70))
15840                 writemsg_level("".join("%s\n" % l for l in msg),
15841                         level=logging.WARNING, noiselevel=-1)
15842
15843         return bool(missing_repo_names)
15844
15845 def repo_name_duplicate_check(trees):
15846         ignored_repos = {}
15847         for root, root_trees in trees.iteritems():
15848                 if 'porttree' in root_trees:
15849                         portdb = root_trees['porttree'].dbapi
15850                         if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15851                                 for repo_name, paths in portdb._ignored_repos:
15852                                         k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15853                                         ignored_repos.setdefault(k, []).extend(paths)
15854
15855         if ignored_repos:
15856                 msg = []
15857                 msg.append('WARNING: One or more repositories ' + \
15858                         'have been ignored due to duplicate')
15859                 msg.append('  profiles/repo_name entries:')
15860                 msg.append('')
15861                 for k in sorted(ignored_repos):
15862                         msg.append('  %s overrides' % (k,))
15863                         for path in ignored_repos[k]:
15864                                 msg.append('    %s' % (path,))
15865                         msg.append('')
15866                 msg.extend('  ' + x for x in textwrap.wrap(
15867                         "All profiles/repo_name entries must be unique in order " + \
15868                         "to avoid having duplicates ignored. " + \
15869                         "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15870                         "/etc/make.conf if you would like to disable this warning."))
15871                 writemsg_level(''.join('%s\n' % l for l in msg),
15872                         level=logging.WARNING, noiselevel=-1)
15873
15874         return bool(ignored_repos)
15875
15876 def config_protect_check(trees):
15877         for root, root_trees in trees.iteritems():
15878                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15879                         msg = "!!! CONFIG_PROTECT is empty"
15880                         if root != "/":
15881                                 msg += " for '%s'" % root
15882                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15883
15884 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15885
15886         if "--quiet" in myopts:
15887                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15888                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15889                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15890                         print "    " + colorize("INFORM", cp)
15891                 return
15892
15893         s = search(root_config, spinner, "--searchdesc" in myopts,
15894                 "--quiet" not in myopts, "--usepkg" in myopts,
15895                 "--usepkgonly" in myopts)
15896         null_cp = portage.dep_getkey(insert_category_into_atom(
15897                 arg, "null"))
15898         cat, atom_pn = portage.catsplit(null_cp)
15899         s.searchkey = atom_pn
15900         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15901                 s.addCP(cp)
15902         s.output()
15903         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15904         print "!!! one of the above fully-qualified ebuild names instead.\n"
15905
15906 def profile_check(trees, myaction, myopts):
15907         if myaction in ("info", "sync"):
15908                 return os.EX_OK
15909         elif "--version" in myopts or "--help" in myopts:
15910                 return os.EX_OK
15911         for root, root_trees in trees.iteritems():
15912                 if root_trees["root_config"].settings.profiles:
15913                         continue
15914                 # generate some profile related warning messages
15915                 validate_ebuild_environment(trees)
15916                 msg = "If you have just changed your profile configuration, you " + \
15917                         "should revert back to the previous configuration. Due to " + \
15918                         "your current profile being invalid, allowed actions are " + \
15919                         "limited to --help, --info, --sync, and --version."
15920                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15921                         level=logging.ERROR, noiselevel=-1)
15922                 return 1
15923         return os.EX_OK
15924
15925 def emerge_main():
15926         global portage  # NFC why this is necessary now - genone
15927         portage._disable_legacy_globals()
15928         # Disable color until we're sure that it should be enabled (after
15929         # EMERGE_DEFAULT_OPTS has been parsed).
15930         portage.output.havecolor = 0
15931         # This first pass is just for options that need to be known as early as
15932         # possible, such as --config-root.  They will be parsed again later,
15933         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15934         # the value of --config-root).
15935         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15936         if "--debug" in myopts:
15937                 os.environ["PORTAGE_DEBUG"] = "1"
15938         if "--config-root" in myopts:
15939                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15940         if "--root" in myopts:
15941                 os.environ["ROOT"] = myopts["--root"]
15942
15943         # Portage needs to ensure a sane umask for the files it creates.
15944         os.umask(022)
15945         settings, trees, mtimedb = load_emerge_config()
15946         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15947         rval = profile_check(trees, myaction, myopts)
15948         if rval != os.EX_OK:
15949                 return rval
15950
15951         if portage._global_updates(trees, mtimedb["updates"]):
15952                 mtimedb.commit()
15953                 # Reload the whole config from scratch.
15954                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15955                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15956
15957         xterm_titles = "notitles" not in settings.features
15958
15959         tmpcmdline = []
15960         if "--ignore-default-opts" not in myopts:
15961                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15962         tmpcmdline.extend(sys.argv[1:])
15963         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15964
15965         if "--digest" in myopts:
15966                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15967                 # Reload the whole config from scratch so that the portdbapi internal
15968                 # config is updated with new FEATURES.
15969                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15970                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15971
15972         for myroot in trees:
15973                 mysettings =  trees[myroot]["vartree"].settings
15974                 mysettings.unlock()
15975                 adjust_config(myopts, mysettings)
15976                 if '--pretend' not in myopts and myaction in \
15977                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15978                         mysettings["PORTAGE_COUNTER_HASH"] = \
15979                                 trees[myroot]["vartree"].dbapi._counter_hash()
15980                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15981                 mysettings.lock()
15982                 del myroot, mysettings
15983
15984         apply_priorities(settings)
15985
15986         spinner = stdout_spinner()
15987         if "candy" in settings.features:
15988                 spinner.update = spinner.update_scroll
15989
15990         if "--quiet" not in myopts:
15991                 portage.deprecated_profile_check(settings=settings)
15992                 repo_name_check(trees)
15993                 repo_name_duplicate_check(trees)
15994                 config_protect_check(trees)
15995
15996         for mytrees in trees.itervalues():
15997                 mydb = mytrees["porttree"].dbapi
15998                 # Freeze the portdbapi for performance (memoize all xmatch results).
15999                 mydb.freeze()
16000         del mytrees, mydb
16001
16002         if "moo" in myfiles:
16003                 print """
16004
16005   Larry loves Gentoo (""" + platform.system() + """)
16006
16007  _______________________
16008 < Have you mooed today? >
16009  -----------------------
16010         \   ^__^
16011          \  (oo)\_______
16012             (__)\       )\/\ 
16013                 ||----w |
16014                 ||     ||
16015
16016 """
16017
16018         for x in myfiles:
16019                 ext = os.path.splitext(x)[1]
16020                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16021                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16022                         break
16023
16024         root_config = trees[settings["ROOT"]]["root_config"]
16025         if myaction == "list-sets":
16026                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16027                 sys.stdout.flush()
16028                 return os.EX_OK
16029
16030         # only expand sets for actions taking package arguments
16031         oldargs = myfiles[:]
16032         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16033                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16034                 if retval != os.EX_OK:
16035                         return retval
16036
16037                 # Need to handle empty sets specially, otherwise emerge will react 
16038                 # with the help message for empty argument lists
16039                 if oldargs and not myfiles:
16040                         print "emerge: no targets left after set expansion"
16041                         return 0
16042
16043         if ("--tree" in myopts) and ("--columns" in myopts):
16044                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16045                 return 1
16046
16047         if ("--quiet" in myopts):
16048                 spinner.update = spinner.update_quiet
16049                 portage.util.noiselimit = -1
16050
16051         # Always create packages if FEATURES=buildpkg
16052         # Imply --buildpkg if --buildpkgonly
16053         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16054                 if "--buildpkg" not in myopts:
16055                         myopts["--buildpkg"] = True
16056
16057         # Always try and fetch binary packages if FEATURES=getbinpkg
16058         if ("getbinpkg" in settings.features):
16059                 myopts["--getbinpkg"] = True
16060
16061         if "--buildpkgonly" in myopts:
16062                 # --buildpkgonly will not merge anything, so
16063                 # it cancels all binary package options.
16064                 for opt in ("--getbinpkg", "--getbinpkgonly",
16065                         "--usepkg", "--usepkgonly"):
16066                         myopts.pop(opt, None)
16067
16068         if "--fetch-all-uri" in myopts:
16069                 myopts["--fetchonly"] = True
16070
16071         if "--skipfirst" in myopts and "--resume" not in myopts:
16072                 myopts["--resume"] = True
16073
16074         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16075                 myopts["--usepkgonly"] = True
16076
16077         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16078                 myopts["--getbinpkg"] = True
16079
16080         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16081                 myopts["--usepkg"] = True
16082
16083         # Also allow -K to apply --usepkg/-k
16084         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16085                 myopts["--usepkg"] = True
16086
16087         # Allow -p to remove --ask
16088         if ("--pretend" in myopts) and ("--ask" in myopts):
16089                 print ">>> --pretend disables --ask... removing --ask from options."
16090                 del myopts["--ask"]
16091
16092         # forbid --ask when not in a terminal
16093         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16094         if ("--ask" in myopts) and (not sys.stdin.isatty()):
16095                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16096                         noiselevel=-1)
16097                 return 1
16098
16099         if settings.get("PORTAGE_DEBUG", "") == "1":
16100                 spinner.update = spinner.update_quiet
16101                 portage.debug=1
16102                 if "python-trace" in settings.features:
16103                         import portage.debug
16104                         portage.debug.set_trace(True)
16105
16106         if not ("--quiet" in myopts):
16107                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16108                         spinner.update = spinner.update_basic
16109
16110         if myaction == 'version':
16111                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16112                         settings.profile_path, settings["CHOST"],
16113                         trees[settings["ROOT"]]["vartree"].dbapi)
16114                 return 0
16115         elif "--help" in myopts:
16116                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16117                 return 0
16118
16119         if "--debug" in myopts:
16120                 print "myaction", myaction
16121                 print "myopts", myopts
16122
16123         if not myaction and not myfiles and "--resume" not in myopts:
16124                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16125                 return 1
16126
16127         pretend = "--pretend" in myopts
16128         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16129         buildpkgonly = "--buildpkgonly" in myopts
16130
16131         # check if root user is the current user for the actions where emerge needs this
16132         if portage.secpass < 2:
16133                 # We've already allowed "--version" and "--help" above.
16134                 if "--pretend" not in myopts and myaction not in ("search","info"):
16135                         need_superuser = myaction in ('clean', 'depclean', 'deselect',
16136                                 'prune', 'unmerge') or not \
16137                                 (fetchonly or \
16138                                 (buildpkgonly and secpass >= 1) or \
16139                                 myaction in ("metadata", "regen") or \
16140                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16141                         if portage.secpass < 1 or \
16142                                 need_superuser:
16143                                 if need_superuser:
16144                                         access_desc = "superuser"
16145                                 else:
16146                                         access_desc = "portage group"
16147                                 # Always show portage_group_warning() when only portage group
16148                                 # access is required but the user is not in the portage group.
16149                                 from portage.data import portage_group_warning
16150                                 if "--ask" in myopts:
16151                                         myopts["--pretend"] = True
16152                                         del myopts["--ask"]
16153                                         print ("%s access is required... " + \
16154                                                 "adding --pretend to options.\n") % access_desc
16155                                         if portage.secpass < 1 and not need_superuser:
16156                                                 portage_group_warning()
16157                                 else:
16158                                         sys.stderr.write(("emerge: %s access is " + \
16159                                                 "required.\n\n") % access_desc)
16160                                         if portage.secpass < 1 and not need_superuser:
16161                                                 portage_group_warning()
16162                                         return 1
16163
16164         disable_emergelog = False
16165         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16166                 if x in myopts:
16167                         disable_emergelog = True
16168                         break
16169         if myaction in ("search", "info"):
16170                 disable_emergelog = True
16171         if disable_emergelog:
16172                 """ Disable emergelog for everything except build or unmerge
16173                 operations.  This helps minimize parallel emerge.log entries that can
16174                 confuse log parsers.  We especially want it disabled during
16175                 parallel-fetch, which uses --resume --fetchonly."""
16176                 global emergelog
16177                 def emergelog(*pargs, **kargs):
16178                         pass
16179
16180         if not "--pretend" in myopts:
16181                 emergelog(xterm_titles, "Started emerge on: "+\
16182                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16183                 myelogstr=""
16184                 if myopts:
16185                         myelogstr=" ".join(myopts)
16186                 if myaction:
16187                         myelogstr+=" "+myaction
16188                 if myfiles:
16189                         myelogstr += " " + " ".join(oldargs)
16190                 emergelog(xterm_titles, " *** emerge " + myelogstr)
16191         del oldargs
16192
16193         def emergeexitsig(signum, frame):
16194                 signal.signal(signal.SIGINT, signal.SIG_IGN)
16195                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16196                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16197                 sys.exit(100+signum)
16198         signal.signal(signal.SIGINT, emergeexitsig)
16199         signal.signal(signal.SIGTERM, emergeexitsig)
16200
16201         def emergeexit():
16202                 """This gets out final log message in before we quit."""
16203                 if "--pretend" not in myopts:
16204                         emergelog(xterm_titles, " *** terminating.")
16205                 if "notitles" not in settings.features:
16206                         xtermTitleReset()
16207         portage.atexit_register(emergeexit)
16208
16209         if myaction in ("config", "metadata", "regen", "sync"):
16210                 if "--pretend" in myopts:
16211                         sys.stderr.write(("emerge: The '%s' action does " + \
16212                                 "not support '--pretend'.\n") % myaction)
16213                         return 1
16214
16215         if "sync" == myaction:
16216                 return action_sync(settings, trees, mtimedb, myopts, myaction)
16217         elif "metadata" == myaction:
16218                 action_metadata(settings, portdb, myopts)
16219         elif myaction=="regen":
16220                 validate_ebuild_environment(trees)
16221                 return action_regen(settings, portdb, myopts.get("--jobs"),
16222                         myopts.get("--load-average"))
16223         # HELP action
16224         elif "config"==myaction:
16225                 validate_ebuild_environment(trees)
16226                 action_config(settings, trees, myopts, myfiles)
16227
16228         # SEARCH action
16229         elif "search"==myaction:
16230                 validate_ebuild_environment(trees)
16231                 action_search(trees[settings["ROOT"]]["root_config"],
16232                         myopts, myfiles, spinner)
16233
16234         elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16235                 validate_ebuild_environment(trees)
16236                 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16237                         myopts, myaction, myfiles, spinner)
16238                 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16239                         post_emerge(root_config, myopts, mtimedb, rval)
16240                 return rval
16241
16242         elif myaction == 'info':
16243
16244                 # Ensure atoms are valid before calling unmerge().
16245                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16246                 valid_atoms = []
16247                 for x in myfiles:
16248                         if is_valid_package_atom(x):
16249                                 try:
16250                                         valid_atoms.append(
16251                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
16252                                 except portage.exception.AmbiguousPackageName, e:
16253                                         msg = "The short ebuild name \"" + x + \
16254                                                 "\" is ambiguous.  Please specify " + \
16255                                                 "one of the following " + \
16256                                                 "fully-qualified ebuild names instead:"
16257                                         for line in textwrap.wrap(msg, 70):
16258                                                 writemsg_level("!!! %s\n" % (line,),
16259                                                         level=logging.ERROR, noiselevel=-1)
16260                                         for i in e[0]:
16261                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
16262                                                         level=logging.ERROR, noiselevel=-1)
16263                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16264                                         return 1
16265                                 continue
16266                         msg = []
16267                         msg.append("'%s' is not a valid package atom." % (x,))
16268                         msg.append("Please check ebuild(5) for full details.")
16269                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16270                                 level=logging.ERROR, noiselevel=-1)
16271                         return 1
16272
16273                 return action_info(settings, trees, myopts, valid_atoms)
16274
16275         # "update", "system", or just process files:
16276         else:
16277                 validate_ebuild_environment(trees)
16278
16279                 for x in myfiles:
16280                         if x.startswith(SETPREFIX) or \
16281                                 is_valid_package_atom(x):
16282                                 continue
16283                         if x[:1] == os.sep:
16284                                 continue
16285                         try:
16286                                 os.lstat(x)
16287                                 continue
16288                         except OSError:
16289                                 pass
16290                         msg = []
16291                         msg.append("'%s' is not a valid package atom." % (x,))
16292                         msg.append("Please check ebuild(5) for full details.")
16293                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16294                                 level=logging.ERROR, noiselevel=-1)
16295                         return 1
16296
16297                 if "--pretend" not in myopts:
16298                         display_news_notification(root_config, myopts)
16299                 retval = action_build(settings, trees, mtimedb,
16300                         myopts, myaction, myfiles, spinner)
16301                 root_config = trees[settings["ROOT"]]["root_config"]
16302                 post_emerge(root_config, myopts, mtimedb, retval)
16303
16304                 return retval