Fix DepPriority.__int__() to return distinguishable values, for use when
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
59
60 from itertools import chain, izip
61
62 try:
63         import cPickle as pickle
64 except ImportError:
65         import pickle
66
67 try:
68         from cStringIO import StringIO
69 except ImportError:
70         from StringIO import StringIO
71
72 class stdout_spinner(object):
73         scroll_msgs = [
74                 "Gentoo Rocks ("+platform.system()+")",
75                 "Thank you for using Gentoo. :)",
76                 "Are you actually trying to read this?",
77                 "How many times have you stared at this?",
78                 "We are generating the cache right now",
79                 "You are paying too much attention.",
80                 "A theory is better than its explanation.",
81                 "Phasers locked on target, Captain.",
82                 "Thrashing is just virtual crashing.",
83                 "To be is to program.",
84                 "Real Users hate Real Programmers.",
85                 "When all else fails, read the instructions.",
86                 "Functionality breeds Contempt.",
87                 "The future lies ahead.",
88                 "3.1415926535897932384626433832795028841971694",
89                 "Sometimes insanity is the only alternative.",
90                 "Inaccuracy saves a world of explanation.",
91         ]
92
93         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
94
95         def __init__(self):
96                 self.spinpos = 0
97                 self.update = self.update_twirl
98                 self.scroll_sequence = self.scroll_msgs[
99                         int(time.time() * 100) % len(self.scroll_msgs)]
100                 self.last_update = 0
101                 self.min_display_latency = 0.05
102
103         def _return_early(self):
104                 """
105                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106                 each update* method should return without doing any output when this
107                 method returns True.
108                 """
109                 cur_time = time.time()
110                 if cur_time - self.last_update < self.min_display_latency:
111                         return True
112                 self.last_update = cur_time
113                 return False
114
115         def update_basic(self):
116                 self.spinpos = (self.spinpos + 1) % 500
117                 if self._return_early():
118                         return
119                 if (self.spinpos % 100) == 0:
120                         if self.spinpos == 0:
121                                 sys.stdout.write(". ")
122                         else:
123                                 sys.stdout.write(".")
124                 sys.stdout.flush()
125
126         def update_scroll(self):
127                 if self._return_early():
128                         return
129                 if(self.spinpos >= len(self.scroll_sequence)):
130                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132                 else:
133                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134                 sys.stdout.flush()
135                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136
137         def update_twirl(self):
138                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139                 if self._return_early():
140                         return
141                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142                 sys.stdout.flush()
143
144         def update_quiet(self):
145                 return
146
147 def userquery(prompt, responses=None, colours=None):
148         """Displays a prompt and a set of responses, then waits for a response
149         which is checked against the responses and the first to match is
150         returned.  An empty response will match the first value in responses.  The
151         input buffer is *not* cleared prior to the prompt!
152
153         prompt: a String.
154         responses: a List of Strings.
155         colours: a List of Functions taking and returning a String, used to
156         process the responses for display. Typically these will be functions
157         like red() but could be e.g. lambda x: "DisplayString".
158         If responses is omitted, defaults to ["Yes", "No"], [green, red].
159         If only colours is omitted, defaults to [bold, ...].
160
161         Returns a member of the List responses. (If called without optional
162         arguments, returns "Yes" or "No".)
163         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164         printed."""
165         if responses is None:
166                 responses = ["Yes", "No"]
167                 colours = [
168                         create_color_func("PROMPT_CHOICE_DEFAULT"),
169                         create_color_func("PROMPT_CHOICE_OTHER")
170                 ]
171         elif colours is None:
172                 colours=[bold]
173         colours=(colours*len(responses))[:len(responses)]
174         print bold(prompt),
175         try:
176                 while True:
177                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178                         for key in responses:
179                                 # An empty response will match the first value in responses.
180                                 if response.upper()==key[:len(response)].upper():
181                                         return key
182                         print "Sorry, response '%s' not understood." % response,
183         except (EOFError, KeyboardInterrupt):
184                 print "Interrupted."
185                 sys.exit(1)
186
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen",  "search",
191 "sync",  "unmerge", "version",
192 ])
193 options=[
194 "--ask",          "--alphabetical",
195 "--buildpkg",     "--buildpkgonly",
196 "--changelog",    "--columns",
197 "--complete-graph",
198 "--debug",        "--deep",
199 "--digest",
200 "--emptytree",
201 "--fetchonly",    "--fetch-all-uri",
202 "--getbinpkg",    "--getbinpkgonly",
203 "--help",         "--ignore-default-opts",
204 "--keep-going",
205 "--noconfmem",
206 "--newuse",
207 "--nodeps",       "--noreplace",
208 "--nospinner",    "--oneshot",
209 "--onlydeps",     "--pretend",
210 "--quiet",        "--resume",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 if setconfig is None:
775                         self.sets = {}
776                 else:
777                         self.sets = self.setconfig.getSets()
778                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779
780 def create_world_atom(pkg, args_set, root_config):
781         """Create a new atom for the world file if one does not exist.  If the
782         argument atom is precise enough to identify a specific slot then a slot
783         atom will be returned. Atoms that are in the system set may also be stored
784         in world since system atoms can only match one slot while world atoms can
785         be greedy with respect to slots.  Unslotted system packages will not be
786         stored in world."""
787
788         arg_atom = args_set.findAtomForPackage(pkg)
789         if not arg_atom:
790                 return None
791         cp = portage.dep_getkey(arg_atom)
792         new_world_atom = cp
793         sets = root_config.sets
794         portdb = root_config.trees["porttree"].dbapi
795         vardb = root_config.trees["vartree"].dbapi
796         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797                 for cpv in portdb.match(cp))
798         slotted = len(available_slots) > 1 or \
799                 (len(available_slots) == 1 and "0" not in available_slots)
800         if not slotted:
801                 # check the vdb in case this is multislot
802                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803                         for cpv in vardb.match(cp))
804                 slotted = len(available_slots) > 1 or \
805                         (len(available_slots) == 1 and "0" not in available_slots)
806         if slotted and arg_atom != cp:
807                 # If the user gave a specific atom, store it as a
808                 # slot atom in the world file.
809                 slot_atom = pkg.slot_atom
810
811                 # For USE=multislot, there are a couple of cases to
812                 # handle here:
813                 #
814                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815                 #    unknown value, so just record an unslotted atom.
816                 #
817                 # 2) SLOT comes from an installed package and there is no
818                 #    matching SLOT in the portage tree.
819                 #
820                 # Make sure that the slot atom is available in either the
821                 # portdb or the vardb, since otherwise the user certainly
822                 # doesn't want the SLOT atom recorded in the world file
823                 # (case 1 above).  If it's only available in the vardb,
824                 # the user may be trying to prevent a USE=multislot
825                 # package from being removed by --depclean (case 2 above).
826
827                 mydb = portdb
828                 if not portdb.match(slot_atom):
829                         # SLOT seems to come from an installed multislot package
830                         mydb = vardb
831                 # If there is no installed package matching the SLOT atom,
832                 # it probably changed SLOT spontaneously due to USE=multislot,
833                 # so just record an unslotted atom.
834                 if vardb.match(slot_atom):
835                         # Now verify that the argument is precise
836                         # enough to identify a specific slot.
837                         matches = mydb.match(arg_atom)
838                         matched_slots = set()
839                         for cpv in matches:
840                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841                         if len(matched_slots) == 1:
842                                 new_world_atom = slot_atom
843
844         if new_world_atom == sets["world"].findAtomForPackage(pkg):
845                 # Both atoms would be identical, so there's nothing to add.
846                 return None
847         if not slotted:
848                 # Unlike world atoms, system atoms are not greedy for slots, so they
849                 # can't be safely excluded from world if they are slotted.
850                 system_atom = sets["system"].findAtomForPackage(pkg)
851                 if system_atom:
852                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
853                                 return None
854                         # System virtuals aren't safe to exclude from world since they can
855                         # match multiple old-style virtuals but only one of them will be
856                         # pulled in by update or depclean.
857                         providers = portdb.mysettings.getvirtuals().get(
858                                 portage.dep_getkey(system_atom))
859                         if providers and len(providers) == 1 and providers[0] == cp:
860                                 return None
861         return new_world_atom
862
863 def filter_iuse_defaults(iuse):
864         for flag in iuse:
865                 if flag.startswith("+") or flag.startswith("-"):
866                         yield flag[1:]
867                 else:
868                         yield flag
869
870 class SlotObject(object):
871         __slots__ = ("__weakref__",)
872
873         def __init__(self, **kwargs):
874                 classes = [self.__class__]
875                 while classes:
876                         c = classes.pop()
877                         if c is SlotObject:
878                                 continue
879                         classes.extend(c.__bases__)
880                         slots = getattr(c, "__slots__", None)
881                         if not slots:
882                                 continue
883                         for myattr in slots:
884                                 myvalue = kwargs.get(myattr, None)
885                                 setattr(self, myattr, myvalue)
886
887         def copy(self):
888                 """
889                 Create a new instance and copy all attributes
890                 defined from __slots__ (including those from
891                 inherited classes).
892                 """
893                 obj = self.__class__()
894
895                 classes = [self.__class__]
896                 while classes:
897                         c = classes.pop()
898                         if c is SlotObject:
899                                 continue
900                         classes.extend(c.__bases__)
901                         slots = getattr(c, "__slots__", None)
902                         if not slots:
903                                 continue
904                         for myattr in slots:
905                                 setattr(obj, myattr, getattr(self, myattr))
906
907                 return obj
908
909 class AbstractDepPriority(SlotObject):
910         __slots__ = ("buildtime", "runtime", "runtime_post")
911
912         def __lt__(self, other):
913                 return self.__int__() < other
914
915         def __le__(self, other):
916                 return self.__int__() <= other
917
918         def __eq__(self, other):
919                 return self.__int__() == other
920
921         def __ne__(self, other):
922                 return self.__int__() != other
923
924         def __gt__(self, other):
925                 return self.__int__() > other
926
927         def __ge__(self, other):
928                 return self.__int__() >= other
929
930         def copy(self):
931                 import copy
932                 return copy.copy(self)
933
934 class DepPriority(AbstractDepPriority):
935
936         __slots__ = ("satisfied", "optional", "rebuild")
937
938         def __int__(self):
939                 """
940                 Note: These priorities are only used for measuring hardness
941                 in the circular dependency display via digraph.debug_print(),
942                 and nothing more. For actual merge order calculations, the
943                 measures defined by the DepPriorityNormalRange and
944                 DepPrioritySatisfiedRange classes are used.
945
946                 Attributes                            Hardness
947
948                 not satisfied and buildtime            8
949                 not satisfied and runtime              7
950                 not satisfied and runtime_post         6
951                 satisfied and buildtime and rebuild    5
952                 satisfied and buildtime                4
953                 satisfied and runtime                  3
954                 satisfied and runtime_post             2
955                 optional                               1
956                 (none of the above)                    0
957
958                 """
959                 if not self.satisfied:
960                         if self.buildtime:
961                                 return 8
962                         if self.runtime:
963                                 return 7
964                         if self.runtime_post:
965                                 return 6
966                 if self.buildtime:
967                         if self.rebuild:
968                                 return 5
969                         return 4
970                 if self.runtime:
971                         return 3
972                 if self.runtime_post:
973                         return 2
974                 if self.optional:
975                         return 1
976                 return 0
977
978         def __str__(self):
979                 if self.optional:
980                         return "optional"
981                 if self.buildtime:
982                         return "buildtime"
983                 if self.runtime:
984                         return "runtime"
985                 if self.runtime_post:
986                         return "runtime_post"
987                 return "soft"
988
989 class BlockerDepPriority(DepPriority):
990         __slots__ = ()
991         def __int__(self):
992                 return 0
993
994         def __str__(self):
995                 return 'blocker'
996
997 BlockerDepPriority.instance = BlockerDepPriority()
998
999 class UnmergeDepPriority(AbstractDepPriority):
1000         __slots__ = ("optional", "satisfied",)
1001         """
1002         Combination of properties           Priority  Category
1003
1004         runtime                                0       HARD
1005         runtime_post                          -1       HARD
1006         buildtime                             -2       SOFT
1007         (none of the above)                   -2       SOFT
1008         """
1009
1010         MAX    =  0
1011         SOFT   = -2
1012         MIN    = -2
1013
1014         def __int__(self):
1015                 if self.runtime:
1016                         return 0
1017                 if self.runtime_post:
1018                         return -1
1019                 if self.buildtime:
1020                         return -2
1021                 return -2
1022
1023         def __str__(self):
1024                 myvalue = self.__int__()
1025                 if myvalue > self.SOFT:
1026                         return "hard"
1027                 return "soft"
1028
1029 class DepPriorityNormalRange(object):
1030         """
1031         DepPriority properties              Index      Category
1032
1033         buildtime                                      HARD
1034         runtime                                3       MEDIUM
1035         runtime_post                           2       MEDIUM_SOFT
1036         optional                               1       SOFT
1037         (none of the above)                    0       NONE
1038         """
1039         MEDIUM      = 3
1040         MEDIUM_SOFT = 2
1041         SOFT        = 1
1042         NONE        = 0
1043
1044         @classmethod
1045         def _ignore_optional(cls, priority):
1046                 if priority.__class__ is not DepPriority:
1047                         return False
1048                 return bool(priority.optional)
1049
1050         @classmethod
1051         def _ignore_runtime_post(cls, priority):
1052                 if priority.__class__ is not DepPriority:
1053                         return False
1054                 return bool(priority.optional or priority.runtime_post)
1055
1056         @classmethod
1057         def _ignore_runtime(cls, priority):
1058                 if priority.__class__ is not DepPriority:
1059                         return False
1060                 return not priority.buildtime
1061
1062         ignore_medium      = _ignore_runtime
1063         ignore_medium_soft = _ignore_runtime_post
1064         ignore_soft        = _ignore_optional
1065
1066 DepPriorityNormalRange.ignore_priority = (
1067         None,
1068         DepPriorityNormalRange._ignore_optional,
1069         DepPriorityNormalRange._ignore_runtime_post,
1070         DepPriorityNormalRange._ignore_runtime
1071 )
1072
1073 class DepPrioritySatisfiedRange(object):
1074         """
1075         DepPriority                         Index      Category
1076
1077         not satisfied and buildtime                    HARD
1078         not satisfied and runtime              7       MEDIUM
1079         not satisfied and runtime_post         6       MEDIUM_SOFT
1080         satisfied and buildtime and rebuild    5       SOFT
1081         satisfied and buildtime                4       SOFT
1082         satisfied and runtime                  3       SOFT
1083         satisfied and runtime_post             2       SOFT
1084         optional                               1       SOFT
1085         (none of the above)                    0       NONE
1086         """
1087         MEDIUM      = 7
1088         MEDIUM_SOFT = 6
1089         SOFT        = 5
1090         NONE        = 0
1091
1092         @classmethod
1093         def _ignore_optional(cls, priority):
1094                 if priority.__class__ is not DepPriority:
1095                         return False
1096                 return bool(priority.optional)
1097
1098         @classmethod
1099         def _ignore_satisfied_runtime_post(cls, priority):
1100                 if priority.__class__ is not DepPriority:
1101                         return False
1102                 if priority.optional:
1103                         return True
1104                 if not priority.satisfied:
1105                         return False
1106                 return bool(priority.runtime_post)
1107
1108         @classmethod
1109         def _ignore_satisfied_runtime(cls, priority):
1110                 if priority.__class__ is not DepPriority:
1111                         return False
1112                 if priority.optional:
1113                         return True
1114                 if not priority.satisfied:
1115                         return False
1116                 return not priority.buildtime
1117
1118         @classmethod
1119         def _ignore_satisfied_buildtime(cls, priority):
1120                 if priority.__class__ is not DepPriority:
1121                         return False
1122                 if priority.optional:
1123                         return True
1124                 if not priority.satisfied:
1125                         return False
1126                 if priority.buildtime:
1127                         return not priority.rebuild
1128                 return True
1129
1130         @classmethod
1131         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1132                 if priority.__class__ is not DepPriority:
1133                         return False
1134                 if priority.optional:
1135                         return True
1136                 return bool(priority.satisfied)
1137
1138         @classmethod
1139         def _ignore_runtime_post(cls, priority):
1140                 if priority.__class__ is not DepPriority:
1141                         return False
1142                 return bool(priority.optional or \
1143                         priority.satisfied or \
1144                         priority.runtime_post)
1145
1146         @classmethod
1147         def _ignore_runtime(cls, priority):
1148                 if priority.__class__ is not DepPriority:
1149                         return False
1150                 return bool(priority.satisfied or \
1151                         not priority.buildtime)
1152
1153         ignore_medium      = _ignore_runtime
1154         ignore_medium_soft = _ignore_runtime_post
1155         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1156
1157 DepPrioritySatisfiedRange.ignore_priority = (
1158         None,
1159         DepPrioritySatisfiedRange._ignore_optional,
1160         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1161         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1162         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1163         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1164         DepPrioritySatisfiedRange._ignore_runtime_post,
1165         DepPrioritySatisfiedRange._ignore_runtime
1166 )
1167
1168 def _find_deep_system_runtime_deps(graph):
1169         deep_system_deps = set()
1170         node_stack = []
1171         for node in graph:
1172                 if not isinstance(node, Package) or \
1173                         node.operation == 'uninstall':
1174                         continue
1175                 if node.root_config.sets['system'].findAtomForPackage(node):
1176                         node_stack.append(node)
1177
1178         def ignore_priority(priority):
1179                 """
1180                 Ignore non-runtime priorities.
1181                 """
1182                 if isinstance(priority, DepPriority) and \
1183                         (priority.runtime or priority.runtime_post):
1184                         return False
1185                 return True
1186
1187         while node_stack:
1188                 node = node_stack.pop()
1189                 if node in deep_system_deps:
1190                         continue
1191                 deep_system_deps.add(node)
1192                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1193                         if not isinstance(child, Package) or \
1194                                 child.operation == 'uninstall':
1195                                 continue
1196                         node_stack.append(child)
1197
1198         return deep_system_deps
1199
1200 class FakeVartree(portage.vartree):
1201         """This is implements an in-memory copy of a vartree instance that provides
1202         all the interfaces required for use by the depgraph.  The vardb is locked
1203         during the constructor call just long enough to read a copy of the
1204         installed package information.  This allows the depgraph to do it's
1205         dependency calculations without holding a lock on the vardb.  It also
1206         allows things like vardb global updates to be done in memory so that the
1207         user doesn't necessarily need write access to the vardb in cases where
1208         global updates are necessary (updates are performed when necessary if there
1209         is not a matching ebuild in the tree)."""
1210         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1211                 self._root_config = root_config
1212                 if pkg_cache is None:
1213                         pkg_cache = {}
1214                 real_vartree = root_config.trees["vartree"]
1215                 portdb = root_config.trees["porttree"].dbapi
1216                 self.root = real_vartree.root
1217                 self.settings = real_vartree.settings
1218                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1219                 if "_mtime_" not in mykeys:
1220                         mykeys.append("_mtime_")
1221                 self._db_keys = mykeys
1222                 self._pkg_cache = pkg_cache
1223                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1224                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1225                 try:
1226                         # At least the parent needs to exist for the lock file.
1227                         portage.util.ensure_dirs(vdb_path)
1228                 except portage.exception.PortageException:
1229                         pass
1230                 vdb_lock = None
1231                 try:
1232                         if acquire_lock and os.access(vdb_path, os.W_OK):
1233                                 vdb_lock = portage.locks.lockdir(vdb_path)
1234                         real_dbapi = real_vartree.dbapi
1235                         slot_counters = {}
1236                         for cpv in real_dbapi.cpv_all():
1237                                 cache_key = ("installed", self.root, cpv, "nomerge")
1238                                 pkg = self._pkg_cache.get(cache_key)
1239                                 if pkg is not None:
1240                                         metadata = pkg.metadata
1241                                 else:
1242                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1243                                 myslot = metadata["SLOT"]
1244                                 mycp = portage.dep_getkey(cpv)
1245                                 myslot_atom = "%s:%s" % (mycp, myslot)
1246                                 try:
1247                                         mycounter = long(metadata["COUNTER"])
1248                                 except ValueError:
1249                                         mycounter = 0
1250                                         metadata["COUNTER"] = str(mycounter)
1251                                 other_counter = slot_counters.get(myslot_atom, None)
1252                                 if other_counter is not None:
1253                                         if other_counter > mycounter:
1254                                                 continue
1255                                 slot_counters[myslot_atom] = mycounter
1256                                 if pkg is None:
1257                                         pkg = Package(built=True, cpv=cpv,
1258                                                 installed=True, metadata=metadata,
1259                                                 root_config=root_config, type_name="installed")
1260                                 self._pkg_cache[pkg] = pkg
1261                                 self.dbapi.cpv_inject(pkg)
1262                         real_dbapi.flush_cache()
1263                 finally:
1264                         if vdb_lock:
1265                                 portage.locks.unlockdir(vdb_lock)
1266                 # Populate the old-style virtuals using the cached values.
1267                 if not self.settings.treeVirtuals:
1268                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1269                                 portage.getCPFromCPV, self.get_all_provides())
1270
1271                 # Intialize variables needed for lazy cache pulls of the live ebuild
1272                 # metadata.  This ensures that the vardb lock is released ASAP, without
1273                 # being delayed in case cache generation is triggered.
1274                 self._aux_get = self.dbapi.aux_get
1275                 self.dbapi.aux_get = self._aux_get_wrapper
1276                 self._match = self.dbapi.match
1277                 self.dbapi.match = self._match_wrapper
1278                 self._aux_get_history = set()
1279                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1280                 self._portdb = portdb
1281                 self._global_updates = None
1282
1283         def _match_wrapper(self, cpv, use_cache=1):
1284                 """
1285                 Make sure the metadata in Package instances gets updated for any
1286                 cpv that is returned from a match() call, since the metadata can
1287                 be accessed directly from the Package instance instead of via
1288                 aux_get().
1289                 """
1290                 matches = self._match(cpv, use_cache=use_cache)
1291                 for cpv in matches:
1292                         if cpv in self._aux_get_history:
1293                                 continue
1294                         self._aux_get_wrapper(cpv, [])
1295                 return matches
1296
1297         def _aux_get_wrapper(self, pkg, wants):
1298                 if pkg in self._aux_get_history:
1299                         return self._aux_get(pkg, wants)
1300                 self._aux_get_history.add(pkg)
1301                 try:
1302                         # Use the live ebuild metadata if possible.
1303                         live_metadata = dict(izip(self._portdb_keys,
1304                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1305                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1306                                 raise KeyError(pkg)
1307                         self.dbapi.aux_update(pkg, live_metadata)
1308                 except (KeyError, portage.exception.PortageException):
1309                         if self._global_updates is None:
1310                                 self._global_updates = \
1311                                         grab_global_updates(self._portdb.porttree_root)
1312                         perform_global_updates(
1313                                 pkg, self.dbapi, self._global_updates)
1314                 return self._aux_get(pkg, wants)
1315
1316         def sync(self, acquire_lock=1):
1317                 """
1318                 Call this method to synchronize state with the real vardb
1319                 after one or more packages may have been installed or
1320                 uninstalled.
1321                 """
1322                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1323                 try:
1324                         # At least the parent needs to exist for the lock file.
1325                         portage.util.ensure_dirs(vdb_path)
1326                 except portage.exception.PortageException:
1327                         pass
1328                 vdb_lock = None
1329                 try:
1330                         if acquire_lock and os.access(vdb_path, os.W_OK):
1331                                 vdb_lock = portage.locks.lockdir(vdb_path)
1332                         self._sync()
1333                 finally:
1334                         if vdb_lock:
1335                                 portage.locks.unlockdir(vdb_lock)
1336
1337         def _sync(self):
1338
1339                 real_vardb = self._root_config.trees["vartree"].dbapi
1340                 current_cpv_set = frozenset(real_vardb.cpv_all())
1341                 pkg_vardb = self.dbapi
1342                 aux_get_history = self._aux_get_history
1343
1344                 # Remove any packages that have been uninstalled.
1345                 for pkg in list(pkg_vardb):
1346                         if pkg.cpv not in current_cpv_set:
1347                                 pkg_vardb.cpv_remove(pkg)
1348                                 aux_get_history.discard(pkg.cpv)
1349
1350                 # Validate counters and timestamps.
1351                 slot_counters = {}
1352                 root = self.root
1353                 validation_keys = ["COUNTER", "_mtime_"]
1354                 for cpv in current_cpv_set:
1355
1356                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1357                         pkg = pkg_vardb.get(pkg_hash_key)
1358                         if pkg is not None:
1359                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1360                                 try:
1361                                         counter = long(counter)
1362                                 except ValueError:
1363                                         counter = 0
1364
1365                                 if counter != pkg.counter or \
1366                                         mtime != pkg.mtime:
1367                                         pkg_vardb.cpv_remove(pkg)
1368                                         aux_get_history.discard(pkg.cpv)
1369                                         pkg = None
1370
1371                         if pkg is None:
1372                                 pkg = self._pkg(cpv)
1373
1374                         other_counter = slot_counters.get(pkg.slot_atom)
1375                         if other_counter is not None:
1376                                 if other_counter > pkg.counter:
1377                                         continue
1378
1379                         slot_counters[pkg.slot_atom] = pkg.counter
1380                         pkg_vardb.cpv_inject(pkg)
1381
1382                 real_vardb.flush_cache()
1383
1384         def _pkg(self, cpv):
1385                 root_config = self._root_config
1386                 real_vardb = root_config.trees["vartree"].dbapi
1387                 pkg = Package(cpv=cpv, installed=True,
1388                         metadata=izip(self._db_keys,
1389                         real_vardb.aux_get(cpv, self._db_keys)),
1390                         root_config=root_config,
1391                         type_name="installed")
1392
1393                 try:
1394                         mycounter = long(pkg.metadata["COUNTER"])
1395                 except ValueError:
1396                         mycounter = 0
1397                         pkg.metadata["COUNTER"] = str(mycounter)
1398
1399                 return pkg
1400
1401 def grab_global_updates(portdir):
1402         from portage.update import grab_updates, parse_updates
1403         updpath = os.path.join(portdir, "profiles", "updates")
1404         try:
1405                 rawupdates = grab_updates(updpath)
1406         except portage.exception.DirectoryNotFound:
1407                 rawupdates = []
1408         upd_commands = []
1409         for mykey, mystat, mycontent in rawupdates:
1410                 commands, errors = parse_updates(mycontent)
1411                 upd_commands.extend(commands)
1412         return upd_commands
1413
1414 def perform_global_updates(mycpv, mydb, mycommands):
1415         from portage.update import update_dbentries
1416         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1417         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1418         updates = update_dbentries(mycommands, aux_dict)
1419         if updates:
1420                 mydb.aux_update(mycpv, updates)
1421
1422 def visible(pkgsettings, pkg):
1423         """
1424         Check if a package is visible. This can raise an InvalidDependString
1425         exception if LICENSE is invalid.
1426         TODO: optionally generate a list of masking reasons
1427         @rtype: Boolean
1428         @returns: True if the package is visible, False otherwise.
1429         """
1430         if not pkg.metadata["SLOT"]:
1431                 return False
1432         if not pkg.installed:
1433                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1434                         return False
1435         eapi = pkg.metadata["EAPI"]
1436         if not portage.eapi_is_supported(eapi):
1437                 return False
1438         if not pkg.installed:
1439                 if portage._eapi_is_deprecated(eapi):
1440                         return False
1441                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1442                         return False
1443         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1444                 return False
1445         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1446                 return False
1447         try:
1448                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1449                         return False
1450         except portage.exception.InvalidDependString:
1451                 return False
1452         return True
1453
1454 def get_masking_status(pkg, pkgsettings, root_config):
1455
1456         mreasons = portage.getmaskingstatus(
1457                 pkg, settings=pkgsettings,
1458                 portdb=root_config.trees["porttree"].dbapi)
1459
1460         if not pkg.installed:
1461                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1462                         mreasons.append("CHOST: %s" % \
1463                                 pkg.metadata["CHOST"])
1464
1465         if not pkg.metadata["SLOT"]:
1466                 mreasons.append("invalid: SLOT is undefined")
1467
1468         return mreasons
1469
1470 def get_mask_info(root_config, cpv, pkgsettings,
1471         db, pkg_type, built, installed, db_keys):
1472         eapi_masked = False
1473         try:
1474                 metadata = dict(izip(db_keys,
1475                         db.aux_get(cpv, db_keys)))
1476         except KeyError:
1477                 metadata = None
1478         if metadata and not built:
1479                 pkgsettings.setcpv(cpv, mydb=metadata)
1480                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1481                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1482         if metadata is None:
1483                 mreasons = ["corruption"]
1484         else:
1485                 eapi = metadata['EAPI']
1486                 if eapi[:1] == '-':
1487                         eapi = eapi[1:]
1488                 if not portage.eapi_is_supported(eapi):
1489                         mreasons = ['EAPI %s' % eapi]
1490                 else:
1491                         pkg = Package(type_name=pkg_type, root_config=root_config,
1492                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1493                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1494         return metadata, mreasons
1495
1496 def show_masked_packages(masked_packages):
1497         shown_licenses = set()
1498         shown_comments = set()
1499         # Maybe there is both an ebuild and a binary. Only
1500         # show one of them to avoid redundant appearance.
1501         shown_cpvs = set()
1502         have_eapi_mask = False
1503         for (root_config, pkgsettings, cpv,
1504                 metadata, mreasons) in masked_packages:
1505                 if cpv in shown_cpvs:
1506                         continue
1507                 shown_cpvs.add(cpv)
1508                 comment, filename = None, None
1509                 if "package.mask" in mreasons:
1510                         comment, filename = \
1511                                 portage.getmaskingreason(
1512                                 cpv, metadata=metadata,
1513                                 settings=pkgsettings,
1514                                 portdb=root_config.trees["porttree"].dbapi,
1515                                 return_location=True)
1516                 missing_licenses = []
1517                 if metadata:
1518                         if not portage.eapi_is_supported(metadata["EAPI"]):
1519                                 have_eapi_mask = True
1520                         try:
1521                                 missing_licenses = \
1522                                         pkgsettings._getMissingLicenses(
1523                                                 cpv, metadata)
1524                         except portage.exception.InvalidDependString:
1525                                 # This will have already been reported
1526                                 # above via mreasons.
1527                                 pass
1528
1529                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1530                 if comment and comment not in shown_comments:
1531                         print filename+":"
1532                         print comment
1533                         shown_comments.add(comment)
1534                 portdb = root_config.trees["porttree"].dbapi
1535                 for l in missing_licenses:
1536                         l_path = portdb.findLicensePath(l)
1537                         if l in shown_licenses:
1538                                 continue
1539                         msg = ("A copy of the '%s' license" + \
1540                         " is located at '%s'.") % (l, l_path)
1541                         print msg
1542                         print
1543                         shown_licenses.add(l)
1544         return have_eapi_mask
1545
1546 class Task(SlotObject):
1547         __slots__ = ("_hash_key", "_hash_value")
1548
1549         def _get_hash_key(self):
1550                 hash_key = getattr(self, "_hash_key", None)
1551                 if hash_key is None:
1552                         raise NotImplementedError(self)
1553                 return hash_key
1554
1555         def __eq__(self, other):
1556                 return self._get_hash_key() == other
1557
1558         def __ne__(self, other):
1559                 return self._get_hash_key() != other
1560
1561         def __hash__(self):
1562                 hash_value = getattr(self, "_hash_value", None)
1563                 if hash_value is None:
1564                         self._hash_value = hash(self._get_hash_key())
1565                 return self._hash_value
1566
1567         def __len__(self):
1568                 return len(self._get_hash_key())
1569
1570         def __getitem__(self, key):
1571                 return self._get_hash_key()[key]
1572
1573         def __iter__(self):
1574                 return iter(self._get_hash_key())
1575
1576         def __contains__(self, key):
1577                 return key in self._get_hash_key()
1578
1579         def __str__(self):
1580                 return str(self._get_hash_key())
1581
1582 class Blocker(Task):
1583
1584         __hash__ = Task.__hash__
1585         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1586
1587         def __init__(self, **kwargs):
1588                 Task.__init__(self, **kwargs)
1589                 self.cp = portage.dep_getkey(self.atom)
1590
1591         def _get_hash_key(self):
1592                 hash_key = getattr(self, "_hash_key", None)
1593                 if hash_key is None:
1594                         self._hash_key = \
1595                                 ("blocks", self.root, self.atom, self.eapi)
1596                 return self._hash_key
1597
1598 class Package(Task):
1599
1600         __hash__ = Task.__hash__
1601         __slots__ = ("built", "cpv", "depth",
1602                 "installed", "metadata", "onlydeps", "operation",
1603                 "root_config", "type_name",
1604                 "category", "counter", "cp", "cpv_split",
1605                 "inherited", "iuse", "mtime",
1606                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1607
1608         metadata_keys = [
1609                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1610                 "INHERITED", "IUSE", "KEYWORDS",
1611                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1612                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1613
1614         def __init__(self, **kwargs):
1615                 Task.__init__(self, **kwargs)
1616                 self.root = self.root_config.root
1617                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1618                 self.cp = portage.cpv_getkey(self.cpv)
1619                 slot = self.slot
1620                 if not slot:
1621                         # Avoid an InvalidAtom exception when creating slot_atom.
1622                         # This package instance will be masked due to empty SLOT.
1623                         slot = '0'
1624                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1625                 self.category, self.pf = portage.catsplit(self.cpv)
1626                 self.cpv_split = portage.catpkgsplit(self.cpv)
1627                 self.pv_split = self.cpv_split[1:]
1628
1629         class _use(object):
1630
1631                 __slots__ = ("__weakref__", "enabled")
1632
1633                 def __init__(self, use):
1634                         self.enabled = frozenset(use)
1635
1636         class _iuse(object):
1637
1638                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1639
1640                 def __init__(self, tokens, iuse_implicit):
1641                         self.tokens = tuple(tokens)
1642                         self.iuse_implicit = iuse_implicit
1643                         enabled = []
1644                         disabled = []
1645                         other = []
1646                         for x in tokens:
1647                                 prefix = x[:1]
1648                                 if prefix == "+":
1649                                         enabled.append(x[1:])
1650                                 elif prefix == "-":
1651                                         disabled.append(x[1:])
1652                                 else:
1653                                         other.append(x)
1654                         self.enabled = frozenset(enabled)
1655                         self.disabled = frozenset(disabled)
1656                         self.all = frozenset(chain(enabled, disabled, other))
1657
1658                 def __getattribute__(self, name):
1659                         if name == "regex":
1660                                 try:
1661                                         return object.__getattribute__(self, "regex")
1662                                 except AttributeError:
1663                                         all = object.__getattribute__(self, "all")
1664                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1665                                         # Escape anything except ".*" which is supposed
1666                                         # to pass through from _get_implicit_iuse()
1667                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1668                                         regex = "^(%s)$" % "|".join(regex)
1669                                         regex = regex.replace("\\.\\*", ".*")
1670                                         self.regex = re.compile(regex)
1671                         return object.__getattribute__(self, name)
1672
1673         def _get_hash_key(self):
1674                 hash_key = getattr(self, "_hash_key", None)
1675                 if hash_key is None:
1676                         if self.operation is None:
1677                                 self.operation = "merge"
1678                                 if self.onlydeps or self.installed:
1679                                         self.operation = "nomerge"
1680                         self._hash_key = \
1681                                 (self.type_name, self.root, self.cpv, self.operation)
1682                 return self._hash_key
1683
1684         def __lt__(self, other):
1685                 if other.cp != self.cp:
1686                         return False
1687                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1688                         return True
1689                 return False
1690
1691         def __le__(self, other):
1692                 if other.cp != self.cp:
1693                         return False
1694                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1695                         return True
1696                 return False
1697
1698         def __gt__(self, other):
1699                 if other.cp != self.cp:
1700                         return False
1701                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1702                         return True
1703                 return False
1704
1705         def __ge__(self, other):
1706                 if other.cp != self.cp:
1707                         return False
1708                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1709                         return True
1710                 return False
1711
1712 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1713         if not x.startswith("UNUSED_"))
1714 _all_metadata_keys.discard("CDEPEND")
1715 _all_metadata_keys.update(Package.metadata_keys)
1716
1717 from portage.cache.mappings import slot_dict_class
1718 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1719
1720 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1721         """
1722         Detect metadata updates and synchronize Package attributes.
1723         """
1724
1725         __slots__ = ("_pkg",)
1726         _wrapped_keys = frozenset(
1727                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1728
1729         def __init__(self, pkg, metadata):
1730                 _PackageMetadataWrapperBase.__init__(self)
1731                 self._pkg = pkg
1732                 self.update(metadata)
1733
1734         def __setitem__(self, k, v):
1735                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1736                 if k in self._wrapped_keys:
1737                         getattr(self, "_set_" + k.lower())(k, v)
1738
1739         def _set_inherited(self, k, v):
1740                 if isinstance(v, basestring):
1741                         v = frozenset(v.split())
1742                 self._pkg.inherited = v
1743
1744         def _set_iuse(self, k, v):
1745                 self._pkg.iuse = self._pkg._iuse(
1746                         v.split(), self._pkg.root_config.iuse_implicit)
1747
1748         def _set_slot(self, k, v):
1749                 self._pkg.slot = v
1750
1751         def _set_use(self, k, v):
1752                 self._pkg.use = self._pkg._use(v.split())
1753
1754         def _set_counter(self, k, v):
1755                 if isinstance(v, basestring):
1756                         try:
1757                                 v = long(v.strip())
1758                         except ValueError:
1759                                 v = 0
1760                 self._pkg.counter = v
1761
1762         def _set__mtime_(self, k, v):
1763                 if isinstance(v, basestring):
1764                         try:
1765                                 v = long(v.strip())
1766                         except ValueError:
1767                                 v = 0
1768                 self._pkg.mtime = v
1769
1770 class EbuildFetchonly(SlotObject):
1771
1772         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1773
1774         def execute(self):
1775                 settings = self.settings
1776                 pkg = self.pkg
1777                 portdb = pkg.root_config.trees["porttree"].dbapi
1778                 ebuild_path = portdb.findname(pkg.cpv)
1779                 settings.setcpv(pkg)
1780                 debug = settings.get("PORTAGE_DEBUG") == "1"
1781                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1782
1783                 if restrict_fetch:
1784                         rval = self._execute_with_builddir()
1785                 else:
1786                         rval = portage.doebuild(ebuild_path, "fetch",
1787                                 settings["ROOT"], settings, debug=debug,
1788                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1789                                 mydbapi=portdb, tree="porttree")
1790
1791                         if rval != os.EX_OK:
1792                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1793                                 eerror(msg, phase="unpack", key=pkg.cpv)
1794
1795                 return rval
1796
1797         def _execute_with_builddir(self):
1798                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1799                 # ensuring sane $PWD (bug #239560) and storing elog
1800                 # messages. Use a private temp directory, in order
1801                 # to avoid locking the main one.
1802                 settings = self.settings
1803                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1804                 from tempfile import mkdtemp
1805                 try:
1806                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1807                 except OSError, e:
1808                         if e.errno != portage.exception.PermissionDenied.errno:
1809                                 raise
1810                         raise portage.exception.PermissionDenied(global_tmpdir)
1811                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1812                 settings.backup_changes("PORTAGE_TMPDIR")
1813                 try:
1814                         retval = self._execute()
1815                 finally:
1816                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1817                         settings.backup_changes("PORTAGE_TMPDIR")
1818                         shutil.rmtree(private_tmpdir)
1819                 return retval
1820
1821         def _execute(self):
1822                 settings = self.settings
1823                 pkg = self.pkg
1824                 root_config = pkg.root_config
1825                 portdb = root_config.trees["porttree"].dbapi
1826                 ebuild_path = portdb.findname(pkg.cpv)
1827                 debug = settings.get("PORTAGE_DEBUG") == "1"
1828                 retval = portage.doebuild(ebuild_path, "fetch",
1829                         self.settings["ROOT"], self.settings, debug=debug,
1830                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1831                         mydbapi=portdb, tree="porttree")
1832
1833                 if retval != os.EX_OK:
1834                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1835                         eerror(msg, phase="unpack", key=pkg.cpv)
1836
1837                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1838                 return retval
1839
1840 class PollConstants(object):
1841
1842         """
1843         Provides POLL* constants that are equivalent to those from the
1844         select module, for use by PollSelectAdapter.
1845         """
1846
1847         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1848         v = 1
1849         for k in names:
1850                 locals()[k] = getattr(select, k, v)
1851                 v *= 2
1852         del k, v
1853
1854 class AsynchronousTask(SlotObject):
1855         """
1856         Subclasses override _wait() and _poll() so that calls
1857         to public methods can be wrapped for implementing
1858         hooks such as exit listener notification.
1859
1860         Sublasses should call self.wait() to notify exit listeners after
1861         the task is complete and self.returncode has been set.
1862         """
1863
1864         __slots__ = ("background", "cancelled", "returncode") + \
1865                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1866
1867         def start(self):
1868                 """
1869                 Start an asynchronous task and then return as soon as possible.
1870                 """
1871                 self._start_hook()
1872                 self._start()
1873
1874         def _start(self):
1875                 raise NotImplementedError(self)
1876
1877         def isAlive(self):
1878                 return self.returncode is None
1879
1880         def poll(self):
1881                 self._wait_hook()
1882                 return self._poll()
1883
1884         def _poll(self):
1885                 return self.returncode
1886
1887         def wait(self):
1888                 if self.returncode is None:
1889                         self._wait()
1890                 self._wait_hook()
1891                 return self.returncode
1892
1893         def _wait(self):
1894                 return self.returncode
1895
1896         def cancel(self):
1897                 self.cancelled = True
1898                 self.wait()
1899
1900         def addStartListener(self, f):
1901                 """
1902                 The function will be called with one argument, a reference to self.
1903                 """
1904                 if self._start_listeners is None:
1905                         self._start_listeners = []
1906                 self._start_listeners.append(f)
1907
1908         def removeStartListener(self, f):
1909                 if self._start_listeners is None:
1910                         return
1911                 self._start_listeners.remove(f)
1912
1913         def _start_hook(self):
1914                 if self._start_listeners is not None:
1915                         start_listeners = self._start_listeners
1916                         self._start_listeners = None
1917
1918                         for f in start_listeners:
1919                                 f(self)
1920
1921         def addExitListener(self, f):
1922                 """
1923                 The function will be called with one argument, a reference to self.
1924                 """
1925                 if self._exit_listeners is None:
1926                         self._exit_listeners = []
1927                 self._exit_listeners.append(f)
1928
1929         def removeExitListener(self, f):
1930                 if self._exit_listeners is None:
1931                         if self._exit_listener_stack is not None:
1932                                 self._exit_listener_stack.remove(f)
1933                         return
1934                 self._exit_listeners.remove(f)
1935
1936         def _wait_hook(self):
1937                 """
1938                 Call this method after the task completes, just before returning
1939                 the returncode from wait() or poll(). This hook is
1940                 used to trigger exit listeners when the returncode first
1941                 becomes available.
1942                 """
1943                 if self.returncode is not None and \
1944                         self._exit_listeners is not None:
1945
1946                         # This prevents recursion, in case one of the
1947                         # exit handlers triggers this method again by
1948                         # calling wait(). Use a stack that gives
1949                         # removeExitListener() an opportunity to consume
1950                         # listeners from the stack, before they can get
1951                         # called below. This is necessary because a call
1952                         # to one exit listener may result in a call to
1953                         # removeExitListener() for another listener on
1954                         # the stack. That listener needs to be removed
1955                         # from the stack since it would be inconsistent
1956                         # to call it after it has been been passed into
1957                         # removeExitListener().
1958                         self._exit_listener_stack = self._exit_listeners
1959                         self._exit_listeners = None
1960
1961                         self._exit_listener_stack.reverse()
1962                         while self._exit_listener_stack:
1963                                 self._exit_listener_stack.pop()(self)
1964
1965 class AbstractPollTask(AsynchronousTask):
1966
1967         __slots__ = ("scheduler",) + \
1968                 ("_registered",)
1969
1970         _bufsize = 4096
1971         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1972         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1973                 _exceptional_events
1974
1975         def _unregister(self):
1976                 raise NotImplementedError(self)
1977
1978         def _unregister_if_appropriate(self, event):
1979                 if self._registered:
1980                         if event & self._exceptional_events:
1981                                 self._unregister()
1982                                 self.cancel()
1983                         elif event & PollConstants.POLLHUP:
1984                                 self._unregister()
1985                                 self.wait()
1986
1987 class PipeReader(AbstractPollTask):
1988
1989         """
1990         Reads output from one or more files and saves it in memory,
1991         for retrieval via the getvalue() method. This is driven by
1992         the scheduler's poll() loop, so it runs entirely within the
1993         current process.
1994         """
1995
1996         __slots__ = ("input_files",) + \
1997                 ("_read_data", "_reg_ids")
1998
1999         def _start(self):
2000                 self._reg_ids = set()
2001                 self._read_data = []
2002                 for k, f in self.input_files.iteritems():
2003                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
2004                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
2005                         self._reg_ids.add(self.scheduler.register(f.fileno(),
2006                                 self._registered_events, self._output_handler))
2007                 self._registered = True
2008
2009         def isAlive(self):
2010                 return self._registered
2011
2012         def cancel(self):
2013                 if self.returncode is None:
2014                         self.returncode = 1
2015                         self.cancelled = True
2016                 self.wait()
2017
2018         def _wait(self):
2019                 if self.returncode is not None:
2020                         return self.returncode
2021
2022                 if self._registered:
2023                         self.scheduler.schedule(self._reg_ids)
2024                         self._unregister()
2025
2026                 self.returncode = os.EX_OK
2027                 return self.returncode
2028
2029         def getvalue(self):
2030                 """Retrieve the entire contents"""
2031                 if sys.hexversion >= 0x3000000:
2032                         return bytes().join(self._read_data)
2033                 return "".join(self._read_data)
2034
2035         def close(self):
2036                 """Free the memory buffer."""
2037                 self._read_data = None
2038
2039         def _output_handler(self, fd, event):
2040
2041                 if event & PollConstants.POLLIN:
2042
2043                         for f in self.input_files.itervalues():
2044                                 if fd == f.fileno():
2045                                         break
2046
2047                         buf = array.array('B')
2048                         try:
2049                                 buf.fromfile(f, self._bufsize)
2050                         except EOFError:
2051                                 pass
2052
2053                         if buf:
2054                                 self._read_data.append(buf.tostring())
2055                         else:
2056                                 self._unregister()
2057                                 self.wait()
2058
2059                 self._unregister_if_appropriate(event)
2060                 return self._registered
2061
2062         def _unregister(self):
2063                 """
2064                 Unregister from the scheduler and close open files.
2065                 """
2066
2067                 self._registered = False
2068
2069                 if self._reg_ids is not None:
2070                         for reg_id in self._reg_ids:
2071                                 self.scheduler.unregister(reg_id)
2072                         self._reg_ids = None
2073
2074                 if self.input_files is not None:
2075                         for f in self.input_files.itervalues():
2076                                 f.close()
2077                         self.input_files = None
2078
2079 class CompositeTask(AsynchronousTask):
2080
2081         __slots__ = ("scheduler",) + ("_current_task",)
2082
2083         def isAlive(self):
2084                 return self._current_task is not None
2085
2086         def cancel(self):
2087                 self.cancelled = True
2088                 if self._current_task is not None:
2089                         self._current_task.cancel()
2090
2091         def _poll(self):
2092                 """
2093                 This does a loop calling self._current_task.poll()
2094                 repeatedly as long as the value of self._current_task
2095                 keeps changing. It calls poll() a maximum of one time
2096                 for a given self._current_task instance. This is useful
2097                 since calling poll() on a task can trigger advance to
2098                 the next task could eventually lead to the returncode
2099                 being set in cases when polling only a single task would
2100                 not have the same effect.
2101                 """
2102
2103                 prev = None
2104                 while True:
2105                         task = self._current_task
2106                         if task is None or task is prev:
2107                                 # don't poll the same task more than once
2108                                 break
2109                         task.poll()
2110                         prev = task
2111
2112                 return self.returncode
2113
2114         def _wait(self):
2115
2116                 prev = None
2117                 while True:
2118                         task = self._current_task
2119                         if task is None:
2120                                 # don't wait for the same task more than once
2121                                 break
2122                         if task is prev:
2123                                 # Before the task.wait() method returned, an exit
2124                                 # listener should have set self._current_task to either
2125                                 # a different task or None. Something is wrong.
2126                                 raise AssertionError("self._current_task has not " + \
2127                                         "changed since calling wait", self, task)
2128                         task.wait()
2129                         prev = task
2130
2131                 return self.returncode
2132
2133         def _assert_current(self, task):
2134                 """
2135                 Raises an AssertionError if the given task is not the
2136                 same one as self._current_task. This can be useful
2137                 for detecting bugs.
2138                 """
2139                 if task is not self._current_task:
2140                         raise AssertionError("Unrecognized task: %s" % (task,))
2141
2142         def _default_exit(self, task):
2143                 """
2144                 Calls _assert_current() on the given task and then sets the
2145                 composite returncode attribute if task.returncode != os.EX_OK.
2146                 If the task failed then self._current_task will be set to None.
2147                 Subclasses can use this as a generic task exit callback.
2148
2149                 @rtype: int
2150                 @returns: The task.returncode attribute.
2151                 """
2152                 self._assert_current(task)
2153                 if task.returncode != os.EX_OK:
2154                         self.returncode = task.returncode
2155                         self._current_task = None
2156                 return task.returncode
2157
2158         def _final_exit(self, task):
2159                 """
2160                 Assumes that task is the final task of this composite task.
2161                 Calls _default_exit() and sets self.returncode to the task's
2162                 returncode and sets self._current_task to None.
2163                 """
2164                 self._default_exit(task)
2165                 self._current_task = None
2166                 self.returncode = task.returncode
2167                 return self.returncode
2168
2169         def _default_final_exit(self, task):
2170                 """
2171                 This calls _final_exit() and then wait().
2172
2173                 Subclasses can use this as a generic final task exit callback.
2174
2175                 """
2176                 self._final_exit(task)
2177                 return self.wait()
2178
2179         def _start_task(self, task, exit_handler):
2180                 """
2181                 Register exit handler for the given task, set it
2182                 as self._current_task, and call task.start().
2183
2184                 Subclasses can use this as a generic way to start
2185                 a task.
2186
2187                 """
2188                 task.addExitListener(exit_handler)
2189                 self._current_task = task
2190                 task.start()
2191
2192 class TaskSequence(CompositeTask):
2193         """
2194         A collection of tasks that executes sequentially. Each task
2195         must have a addExitListener() method that can be used as
2196         a means to trigger movement from one task to the next.
2197         """
2198
2199         __slots__ = ("_task_queue",)
2200
2201         def __init__(self, **kwargs):
2202                 AsynchronousTask.__init__(self, **kwargs)
2203                 self._task_queue = deque()
2204
2205         def add(self, task):
2206                 self._task_queue.append(task)
2207
2208         def _start(self):
2209                 self._start_next_task()
2210
2211         def cancel(self):
2212                 self._task_queue.clear()
2213                 CompositeTask.cancel(self)
2214
2215         def _start_next_task(self):
2216                 self._start_task(self._task_queue.popleft(),
2217                         self._task_exit_handler)
2218
2219         def _task_exit_handler(self, task):
2220                 if self._default_exit(task) != os.EX_OK:
2221                         self.wait()
2222                 elif self._task_queue:
2223                         self._start_next_task()
2224                 else:
2225                         self._final_exit(task)
2226                         self.wait()
2227
2228 class SubProcess(AbstractPollTask):
2229
2230         __slots__ = ("pid",) + \
2231                 ("_files", "_reg_id")
2232
2233         # A file descriptor is required for the scheduler to monitor changes from
2234         # inside a poll() loop. When logging is not enabled, create a pipe just to
2235         # serve this purpose alone.
2236         _dummy_pipe_fd = 9
2237
2238         def _poll(self):
2239                 if self.returncode is not None:
2240                         return self.returncode
2241                 if self.pid is None:
2242                         return self.returncode
2243                 if self._registered:
2244                         return self.returncode
2245
2246                 try:
2247                         retval = os.waitpid(self.pid, os.WNOHANG)
2248                 except OSError, e:
2249                         if e.errno != errno.ECHILD:
2250                                 raise
2251                         del e
2252                         retval = (self.pid, 1)
2253
2254                 if retval == (0, 0):
2255                         return None
2256                 self._set_returncode(retval)
2257                 return self.returncode
2258
2259         def cancel(self):
2260                 if self.isAlive():
2261                         try:
2262                                 os.kill(self.pid, signal.SIGTERM)
2263                         except OSError, e:
2264                                 if e.errno != errno.ESRCH:
2265                                         raise
2266                                 del e
2267
2268                 self.cancelled = True
2269                 if self.pid is not None:
2270                         self.wait()
2271                 return self.returncode
2272
2273         def isAlive(self):
2274                 return self.pid is not None and \
2275                         self.returncode is None
2276
2277         def _wait(self):
2278
2279                 if self.returncode is not None:
2280                         return self.returncode
2281
2282                 if self._registered:
2283                         self.scheduler.schedule(self._reg_id)
2284                         self._unregister()
2285                         if self.returncode is not None:
2286                                 return self.returncode
2287
2288                 try:
2289                         wait_retval = os.waitpid(self.pid, 0)
2290                 except OSError, e:
2291                         if e.errno != errno.ECHILD:
2292                                 raise
2293                         del e
2294                         self._set_returncode((self.pid, 1))
2295                 else:
2296                         self._set_returncode(wait_retval)
2297
2298                 return self.returncode
2299
2300         def _unregister(self):
2301                 """
2302                 Unregister from the scheduler and close open files.
2303                 """
2304
2305                 self._registered = False
2306
2307                 if self._reg_id is not None:
2308                         self.scheduler.unregister(self._reg_id)
2309                         self._reg_id = None
2310
2311                 if self._files is not None:
2312                         for f in self._files.itervalues():
2313                                 f.close()
2314                         self._files = None
2315
2316         def _set_returncode(self, wait_retval):
2317
2318                 retval = wait_retval[1]
2319
2320                 if retval != os.EX_OK:
2321                         if retval & 0xff:
2322                                 retval = (retval & 0xff) << 8
2323                         else:
2324                                 retval = retval >> 8
2325
2326                 self.returncode = retval
2327
2328 class SpawnProcess(SubProcess):
2329
2330         """
2331         Constructor keyword args are passed into portage.process.spawn().
2332         The required "args" keyword argument will be passed as the first
2333         spawn() argument.
2334         """
2335
2336         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2337                 "uid", "gid", "groups", "umask", "logfile",
2338                 "path_lookup", "pre_exec")
2339
2340         __slots__ = ("args",) + \
2341                 _spawn_kwarg_names
2342
2343         _file_names = ("log", "process", "stdout")
2344         _files_dict = slot_dict_class(_file_names, prefix="")
2345
2346         def _start(self):
2347
2348                 if self.cancelled:
2349                         return
2350
2351                 if self.fd_pipes is None:
2352                         self.fd_pipes = {}
2353                 fd_pipes = self.fd_pipes
2354                 fd_pipes.setdefault(0, sys.stdin.fileno())
2355                 fd_pipes.setdefault(1, sys.stdout.fileno())
2356                 fd_pipes.setdefault(2, sys.stderr.fileno())
2357
2358                 # flush any pending output
2359                 for fd in fd_pipes.itervalues():
2360                         if fd == sys.stdout.fileno():
2361                                 sys.stdout.flush()
2362                         if fd == sys.stderr.fileno():
2363                                 sys.stderr.flush()
2364
2365                 logfile = self.logfile
2366                 self._files = self._files_dict()
2367                 files = self._files
2368
2369                 master_fd, slave_fd = self._pipe(fd_pipes)
2370                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2371                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2372
2373                 null_input = None
2374                 fd_pipes_orig = fd_pipes.copy()
2375                 if self.background:
2376                         # TODO: Use job control functions like tcsetpgrp() to control
2377                         # access to stdin. Until then, use /dev/null so that any
2378                         # attempts to read from stdin will immediately return EOF
2379                         # instead of blocking indefinitely.
2380                         null_input = open('/dev/null', 'rb')
2381                         fd_pipes[0] = null_input.fileno()
2382                 else:
2383                         fd_pipes[0] = fd_pipes_orig[0]
2384
2385                 files.process = os.fdopen(master_fd, 'rb')
2386                 if logfile is not None:
2387
2388                         fd_pipes[1] = slave_fd
2389                         fd_pipes[2] = slave_fd
2390
2391                         files.log = open(logfile, mode='ab')
2392                         portage.util.apply_secpass_permissions(logfile,
2393                                 uid=portage.portage_uid, gid=portage.portage_gid,
2394                                 mode=0660)
2395
2396                         if not self.background:
2397                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2398
2399                         output_handler = self._output_handler
2400
2401                 else:
2402
2403                         # Create a dummy pipe so the scheduler can monitor
2404                         # the process from inside a poll() loop.
2405                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2406                         if self.background:
2407                                 fd_pipes[1] = slave_fd
2408                                 fd_pipes[2] = slave_fd
2409                         output_handler = self._dummy_handler
2410
2411                 kwargs = {}
2412                 for k in self._spawn_kwarg_names:
2413                         v = getattr(self, k)
2414                         if v is not None:
2415                                 kwargs[k] = v
2416
2417                 kwargs["fd_pipes"] = fd_pipes
2418                 kwargs["returnpid"] = True
2419                 kwargs.pop("logfile", None)
2420
2421                 self._reg_id = self.scheduler.register(files.process.fileno(),
2422                         self._registered_events, output_handler)
2423                 self._registered = True
2424
2425                 retval = self._spawn(self.args, **kwargs)
2426
2427                 os.close(slave_fd)
2428                 if null_input is not None:
2429                         null_input.close()
2430
2431                 if isinstance(retval, int):
2432                         # spawn failed
2433                         self._unregister()
2434                         self.returncode = retval
2435                         self.wait()
2436                         return
2437
2438                 self.pid = retval[0]
2439                 portage.process.spawned_pids.remove(self.pid)
2440
2441         def _pipe(self, fd_pipes):
2442                 """
2443                 @type fd_pipes: dict
2444                 @param fd_pipes: pipes from which to copy terminal size if desired.
2445                 """
2446                 return os.pipe()
2447
2448         def _spawn(self, args, **kwargs):
2449                 return portage.process.spawn(args, **kwargs)
2450
2451         def _output_handler(self, fd, event):
2452
2453                 if event & PollConstants.POLLIN:
2454
2455                         files = self._files
2456                         buf = array.array('B')
2457                         try:
2458                                 buf.fromfile(files.process, self._bufsize)
2459                         except EOFError:
2460                                 pass
2461
2462                         if buf:
2463                                 if not self.background:
2464                                         write_successful = False
2465                                         failures = 0
2466                                         while True:
2467                                                 try:
2468                                                         if not write_successful:
2469                                                                 buf.tofile(files.stdout)
2470                                                                 write_successful = True
2471                                                         files.stdout.flush()
2472                                                         break
2473                                                 except IOError, e:
2474                                                         if e.errno != errno.EAGAIN:
2475                                                                 raise
2476                                                         del e
2477                                                         failures += 1
2478                                                         if failures > 50:
2479                                                                 # Avoid a potentially infinite loop. In
2480                                                                 # most cases, the failure count is zero
2481                                                                 # and it's unlikely to exceed 1.
2482                                                                 raise
2483
2484                                                         # This means that a subprocess has put an inherited
2485                                                         # stdio file descriptor (typically stdin) into
2486                                                         # O_NONBLOCK mode. This is not acceptable (see bug
2487                                                         # #264435), so revert it. We need to use a loop
2488                                                         # here since there's a race condition due to
2489                                                         # parallel processes being able to change the
2490                                                         # flags on the inherited file descriptor.
2491                                                         # TODO: When possible, avoid having child processes
2492                                                         # inherit stdio file descriptors from portage
2493                                                         # (maybe it can't be avoided with
2494                                                         # PROPERTIES=interactive).
2495                                                         fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2496                                                                 fcntl.fcntl(files.stdout.fileno(),
2497                                                                 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2498
2499                                 buf.tofile(files.log)
2500                                 files.log.flush()
2501                         else:
2502                                 self._unregister()
2503                                 self.wait()
2504
2505                 self._unregister_if_appropriate(event)
2506                 return self._registered
2507
2508         def _dummy_handler(self, fd, event):
2509                 """
2510                 This method is mainly interested in detecting EOF, since
2511                 the only purpose of the pipe is to allow the scheduler to
2512                 monitor the process from inside a poll() loop.
2513                 """
2514
2515                 if event & PollConstants.POLLIN:
2516
2517                         buf = array.array('B')
2518                         try:
2519                                 buf.fromfile(self._files.process, self._bufsize)
2520                         except EOFError:
2521                                 pass
2522
2523                         if buf:
2524                                 pass
2525                         else:
2526                                 self._unregister()
2527                                 self.wait()
2528
2529                 self._unregister_if_appropriate(event)
2530                 return self._registered
2531
2532 class MiscFunctionsProcess(SpawnProcess):
2533         """
2534         Spawns misc-functions.sh with an existing ebuild environment.
2535         """
2536
2537         __slots__ = ("commands", "phase", "pkg", "settings")
2538
2539         def _start(self):
2540                 settings = self.settings
2541                 settings.pop("EBUILD_PHASE", None)
2542                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2543                 misc_sh_binary = os.path.join(portage_bin_path,
2544                         os.path.basename(portage.const.MISC_SH_BINARY))
2545
2546                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2547                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2548
2549                 portage._doebuild_exit_status_unlink(
2550                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2551
2552                 SpawnProcess._start(self)
2553
2554         def _spawn(self, args, **kwargs):
2555                 settings = self.settings
2556                 debug = settings.get("PORTAGE_DEBUG") == "1"
2557                 return portage.spawn(" ".join(args), settings,
2558                         debug=debug, **kwargs)
2559
2560         def _set_returncode(self, wait_retval):
2561                 SpawnProcess._set_returncode(self, wait_retval)
2562                 self.returncode = portage._doebuild_exit_status_check_and_log(
2563                         self.settings, self.phase, self.returncode)
2564
2565 class EbuildFetcher(SpawnProcess):
2566
2567         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2568                 ("_build_dir",)
2569
2570         def _start(self):
2571
2572                 root_config = self.pkg.root_config
2573                 portdb = root_config.trees["porttree"].dbapi
2574                 ebuild_path = portdb.findname(self.pkg.cpv)
2575                 settings = self.config_pool.allocate()
2576                 settings.setcpv(self.pkg)
2577
2578                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2579                 # should not be touched since otherwise it could interfere with
2580                 # another instance of the same cpv concurrently being built for a
2581                 # different $ROOT (currently, builds only cooperate with prefetchers
2582                 # that are spawned for the same $ROOT).
2583                 if not self.prefetch:
2584                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2585                         self._build_dir.lock()
2586                         self._build_dir.clean_log()
2587                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2588                         if self.logfile is None:
2589                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2590
2591                 phase = "fetch"
2592                 if self.fetchall:
2593                         phase = "fetchall"
2594
2595                 # If any incremental variables have been overridden
2596                 # via the environment, those values need to be passed
2597                 # along here so that they are correctly considered by
2598                 # the config instance in the subproccess.
2599                 fetch_env = os.environ.copy()
2600
2601                 nocolor = settings.get("NOCOLOR")
2602                 if nocolor is not None:
2603                         fetch_env["NOCOLOR"] = nocolor
2604
2605                 fetch_env["PORTAGE_NICENESS"] = "0"
2606                 if self.prefetch:
2607                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2608
2609                 ebuild_binary = os.path.join(
2610                         settings["PORTAGE_BIN_PATH"], "ebuild")
2611
2612                 fetch_args = [ebuild_binary, ebuild_path, phase]
2613                 debug = settings.get("PORTAGE_DEBUG") == "1"
2614                 if debug:
2615                         fetch_args.append("--debug")
2616
2617                 self.args = fetch_args
2618                 self.env = fetch_env
2619                 SpawnProcess._start(self)
2620
2621         def _pipe(self, fd_pipes):
2622                 """When appropriate, use a pty so that fetcher progress bars,
2623                 like wget has, will work properly."""
2624                 if self.background or not sys.stdout.isatty():
2625                         # When the output only goes to a log file,
2626                         # there's no point in creating a pty.
2627                         return os.pipe()
2628                 stdout_pipe = fd_pipes.get(1)
2629                 got_pty, master_fd, slave_fd = \
2630                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2631                 return (master_fd, slave_fd)
2632
2633         def _set_returncode(self, wait_retval):
2634                 SpawnProcess._set_returncode(self, wait_retval)
2635                 # Collect elog messages that might have been
2636                 # created by the pkg_nofetch phase.
2637                 if self._build_dir is not None:
2638                         # Skip elog messages for prefetch, in order to avoid duplicates.
2639                         if not self.prefetch and self.returncode != os.EX_OK:
2640                                 elog_out = None
2641                                 if self.logfile is not None:
2642                                         if self.background:
2643                                                 elog_out = open(self.logfile, 'a')
2644                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2645                                 if self.logfile is not None:
2646                                         msg += ", Log file:"
2647                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2648                                 if self.logfile is not None:
2649                                         eerror(" '%s'" % (self.logfile,),
2650                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2651                                 if elog_out is not None:
2652                                         elog_out.close()
2653                         if not self.prefetch:
2654                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2655                         features = self._build_dir.settings.features
2656                         if self.returncode == os.EX_OK:
2657                                 self._build_dir.clean_log()
2658                         self._build_dir.unlock()
2659                         self.config_pool.deallocate(self._build_dir.settings)
2660                         self._build_dir = None
2661
2662 class EbuildBuildDir(SlotObject):
2663
2664         __slots__ = ("dir_path", "pkg", "settings",
2665                 "locked", "_catdir", "_lock_obj")
2666
2667         def __init__(self, **kwargs):
2668                 SlotObject.__init__(self, **kwargs)
2669                 self.locked = False
2670
2671         def lock(self):
2672                 """
2673                 This raises an AlreadyLocked exception if lock() is called
2674                 while a lock is already held. In order to avoid this, call
2675                 unlock() or check whether the "locked" attribute is True
2676                 or False before calling lock().
2677                 """
2678                 if self._lock_obj is not None:
2679                         raise self.AlreadyLocked((self._lock_obj,))
2680
2681                 dir_path = self.dir_path
2682                 if dir_path is None:
2683                         root_config = self.pkg.root_config
2684                         portdb = root_config.trees["porttree"].dbapi
2685                         ebuild_path = portdb.findname(self.pkg.cpv)
2686                         settings = self.settings
2687                         settings.setcpv(self.pkg)
2688                         debug = settings.get("PORTAGE_DEBUG") == "1"
2689                         use_cache = 1 # always true
2690                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2691                                 self.settings, debug, use_cache, portdb)
2692                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2693
2694                 catdir = os.path.dirname(dir_path)
2695                 self._catdir = catdir
2696
2697                 portage.util.ensure_dirs(os.path.dirname(catdir),
2698                         gid=portage.portage_gid,
2699                         mode=070, mask=0)
2700                 catdir_lock = None
2701                 try:
2702                         catdir_lock = portage.locks.lockdir(catdir)
2703                         portage.util.ensure_dirs(catdir,
2704                                 gid=portage.portage_gid,
2705                                 mode=070, mask=0)
2706                         self._lock_obj = portage.locks.lockdir(dir_path)
2707                 finally:
2708                         self.locked = self._lock_obj is not None
2709                         if catdir_lock is not None:
2710                                 portage.locks.unlockdir(catdir_lock)
2711
2712         def clean_log(self):
2713                 """Discard existing log."""
2714                 settings = self.settings
2715
2716                 for x in ('.logid', 'temp/build.log'):
2717                         try:
2718                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2719                         except OSError:
2720                                 pass
2721
2722         def unlock(self):
2723                 if self._lock_obj is None:
2724                         return
2725
2726                 portage.locks.unlockdir(self._lock_obj)
2727                 self._lock_obj = None
2728                 self.locked = False
2729
2730                 catdir = self._catdir
2731                 catdir_lock = None
2732                 try:
2733                         catdir_lock = portage.locks.lockdir(catdir)
2734                 finally:
2735                         if catdir_lock:
2736                                 try:
2737                                         os.rmdir(catdir)
2738                                 except OSError, e:
2739                                         if e.errno not in (errno.ENOENT,
2740                                                 errno.ENOTEMPTY, errno.EEXIST):
2741                                                 raise
2742                                         del e
2743                                 portage.locks.unlockdir(catdir_lock)
2744
2745         class AlreadyLocked(portage.exception.PortageException):
2746                 pass
2747
2748 class EbuildBuild(CompositeTask):
2749
2750         __slots__ = ("args_set", "config_pool", "find_blockers",
2751                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2752                 "prefetcher", "settings", "world_atom") + \
2753                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2754
2755         def _start(self):
2756
2757                 logger = self.logger
2758                 opts = self.opts
2759                 pkg = self.pkg
2760                 settings = self.settings
2761                 world_atom = self.world_atom
2762                 root_config = pkg.root_config
2763                 tree = "porttree"
2764                 self._tree = tree
2765                 portdb = root_config.trees[tree].dbapi
2766                 settings.setcpv(pkg)
2767                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2768                 ebuild_path = portdb.findname(self.pkg.cpv)
2769                 self._ebuild_path = ebuild_path
2770
2771                 prefetcher = self.prefetcher
2772                 if prefetcher is None:
2773                         pass
2774                 elif not prefetcher.isAlive():
2775                         prefetcher.cancel()
2776                 elif prefetcher.poll() is None:
2777
2778                         waiting_msg = "Fetching files " + \
2779                                 "in the background. " + \
2780                                 "To view fetch progress, run `tail -f " + \
2781                                 "/var/log/emerge-fetch.log` in another " + \
2782                                 "terminal."
2783                         msg_prefix = colorize("GOOD", " * ")
2784                         from textwrap import wrap
2785                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2786                                 for line in wrap(waiting_msg, 65))
2787                         if not self.background:
2788                                 writemsg(waiting_msg, noiselevel=-1)
2789
2790                         self._current_task = prefetcher
2791                         prefetcher.addExitListener(self._prefetch_exit)
2792                         return
2793
2794                 self._prefetch_exit(prefetcher)
2795
2796         def _prefetch_exit(self, prefetcher):
2797
2798                 opts = self.opts
2799                 pkg = self.pkg
2800                 settings = self.settings
2801
2802                 if opts.fetchonly:
2803                                 fetcher = EbuildFetchonly(
2804                                         fetch_all=opts.fetch_all_uri,
2805                                         pkg=pkg, pretend=opts.pretend,
2806                                         settings=settings)
2807                                 retval = fetcher.execute()
2808                                 self.returncode = retval
2809                                 self.wait()
2810                                 return
2811
2812                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2813                         fetchall=opts.fetch_all_uri,
2814                         fetchonly=opts.fetchonly,
2815                         background=self.background,
2816                         pkg=pkg, scheduler=self.scheduler)
2817
2818                 self._start_task(fetcher, self._fetch_exit)
2819
2820         def _fetch_exit(self, fetcher):
2821                 opts = self.opts
2822                 pkg = self.pkg
2823
2824                 fetch_failed = False
2825                 if opts.fetchonly:
2826                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2827                 else:
2828                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2829
2830                 if fetch_failed and fetcher.logfile is not None and \
2831                         os.path.exists(fetcher.logfile):
2832                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2833
2834                 if not fetch_failed and fetcher.logfile is not None:
2835                         # Fetch was successful, so remove the fetch log.
2836                         try:
2837                                 os.unlink(fetcher.logfile)
2838                         except OSError:
2839                                 pass
2840
2841                 if fetch_failed or opts.fetchonly:
2842                         self.wait()
2843                         return
2844
2845                 logger = self.logger
2846                 opts = self.opts
2847                 pkg_count = self.pkg_count
2848                 scheduler = self.scheduler
2849                 settings = self.settings
2850                 features = settings.features
2851                 ebuild_path = self._ebuild_path
2852                 system_set = pkg.root_config.sets["system"]
2853
2854                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2855                 self._build_dir.lock()
2856
2857                 # Cleaning is triggered before the setup
2858                 # phase, in portage.doebuild().
2859                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2860                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2861                 short_msg = "emerge: (%s of %s) %s Clean" % \
2862                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2863                 logger.log(msg, short_msg=short_msg)
2864
2865                 #buildsyspkg: Check if we need to _force_ binary package creation
2866                 self._issyspkg = "buildsyspkg" in features and \
2867                                 system_set.findAtomForPackage(pkg) and \
2868                                 not opts.buildpkg
2869
2870                 if opts.buildpkg or self._issyspkg:
2871
2872                         self._buildpkg = True
2873
2874                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2875                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2876                         short_msg = "emerge: (%s of %s) %s Compile" % \
2877                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2878                         logger.log(msg, short_msg=short_msg)
2879
2880                 else:
2881                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2882                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2883                         short_msg = "emerge: (%s of %s) %s Compile" % \
2884                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2885                         logger.log(msg, short_msg=short_msg)
2886
2887                 build = EbuildExecuter(background=self.background, pkg=pkg,
2888                         scheduler=scheduler, settings=settings)
2889                 self._start_task(build, self._build_exit)
2890
2891         def _unlock_builddir(self):
2892                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2893                 self._build_dir.unlock()
2894
2895         def _build_exit(self, build):
2896                 if self._default_exit(build) != os.EX_OK:
2897                         self._unlock_builddir()
2898                         self.wait()
2899                         return
2900
2901                 opts = self.opts
2902                 buildpkg = self._buildpkg
2903
2904                 if not buildpkg:
2905                         self._final_exit(build)
2906                         self.wait()
2907                         return
2908
2909                 if self._issyspkg:
2910                         msg = ">>> This is a system package, " + \
2911                                 "let's pack a rescue tarball.\n"
2912
2913                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2914                         if log_path is not None:
2915                                 log_file = open(log_path, 'a')
2916                                 try:
2917                                         log_file.write(msg)
2918                                 finally:
2919                                         log_file.close()
2920
2921                         if not self.background:
2922                                 portage.writemsg_stdout(msg, noiselevel=-1)
2923
2924                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2925                         scheduler=self.scheduler, settings=self.settings)
2926
2927                 self._start_task(packager, self._buildpkg_exit)
2928
2929         def _buildpkg_exit(self, packager):
2930                 """
2931                 Released build dir lock when there is a failure or
2932                 when in buildpkgonly mode. Otherwise, the lock will
2933                 be released when merge() is called.
2934                 """
2935
2936                 if self._default_exit(packager) != os.EX_OK:
2937                         self._unlock_builddir()
2938                         self.wait()
2939                         return
2940
2941                 if self.opts.buildpkgonly:
2942                         # Need to call "clean" phase for buildpkgonly mode
2943                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2944                         phase = "clean"
2945                         clean_phase = EbuildPhase(background=self.background,
2946                                 pkg=self.pkg, phase=phase,
2947                                 scheduler=self.scheduler, settings=self.settings,
2948                                 tree=self._tree)
2949                         self._start_task(clean_phase, self._clean_exit)
2950                         return
2951
2952                 # Continue holding the builddir lock until
2953                 # after the package has been installed.
2954                 self._current_task = None
2955                 self.returncode = packager.returncode
2956                 self.wait()
2957
2958         def _clean_exit(self, clean_phase):
2959                 if self._final_exit(clean_phase) != os.EX_OK or \
2960                         self.opts.buildpkgonly:
2961                         self._unlock_builddir()
2962                 self.wait()
2963
2964         def install(self):
2965                 """
2966                 Install the package and then clean up and release locks.
2967                 Only call this after the build has completed successfully
2968                 and neither fetchonly nor buildpkgonly mode are enabled.
2969                 """
2970
2971                 find_blockers = self.find_blockers
2972                 ldpath_mtimes = self.ldpath_mtimes
2973                 logger = self.logger
2974                 pkg = self.pkg
2975                 pkg_count = self.pkg_count
2976                 settings = self.settings
2977                 world_atom = self.world_atom
2978                 ebuild_path = self._ebuild_path
2979                 tree = self._tree
2980
2981                 merge = EbuildMerge(find_blockers=self.find_blockers,
2982                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2983                         pkg_count=pkg_count, pkg_path=ebuild_path,
2984                         scheduler=self.scheduler,
2985                         settings=settings, tree=tree, world_atom=world_atom)
2986
2987                 msg = " === (%s of %s) Merging (%s::%s)" % \
2988                         (pkg_count.curval, pkg_count.maxval,
2989                         pkg.cpv, ebuild_path)
2990                 short_msg = "emerge: (%s of %s) %s Merge" % \
2991                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2992                 logger.log(msg, short_msg=short_msg)
2993
2994                 try:
2995                         rval = merge.execute()
2996                 finally:
2997                         self._unlock_builddir()
2998
2999                 return rval
3000
3001 class EbuildExecuter(CompositeTask):
3002
3003         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
3004
3005         _phases = ("prepare", "configure", "compile", "test", "install")
3006
3007         _live_eclasses = frozenset([
3008                 "bzr",
3009                 "cvs",
3010                 "darcs",
3011                 "git",
3012                 "mercurial",
3013                 "subversion"
3014         ])
3015
3016         def _start(self):
3017                 self._tree = "porttree"
3018                 pkg = self.pkg
3019                 phase = "clean"
3020                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
3021                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
3022                 self._start_task(clean_phase, self._clean_phase_exit)
3023
3024         def _clean_phase_exit(self, clean_phase):
3025
3026                 if self._default_exit(clean_phase) != os.EX_OK:
3027                         self.wait()
3028                         return
3029
3030                 pkg = self.pkg
3031                 scheduler = self.scheduler
3032                 settings = self.settings
3033                 cleanup = 1
3034
3035                 # This initializes PORTAGE_LOG_FILE.
3036                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3037
3038                 setup_phase = EbuildPhase(background=self.background,
3039                         pkg=pkg, phase="setup", scheduler=scheduler,
3040                         settings=settings, tree=self._tree)
3041
3042                 setup_phase.addExitListener(self._setup_exit)
3043                 self._current_task = setup_phase
3044                 self.scheduler.scheduleSetup(setup_phase)
3045
3046         def _setup_exit(self, setup_phase):
3047
3048                 if self._default_exit(setup_phase) != os.EX_OK:
3049                         self.wait()
3050                         return
3051
3052                 unpack_phase = EbuildPhase(background=self.background,
3053                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3054                         settings=self.settings, tree=self._tree)
3055
3056                 if self._live_eclasses.intersection(self.pkg.inherited):
3057                         # Serialize $DISTDIR access for live ebuilds since
3058                         # otherwise they can interfere with eachother.
3059
3060                         unpack_phase.addExitListener(self._unpack_exit)
3061                         self._current_task = unpack_phase
3062                         self.scheduler.scheduleUnpack(unpack_phase)
3063
3064                 else:
3065                         self._start_task(unpack_phase, self._unpack_exit)
3066
3067         def _unpack_exit(self, unpack_phase):
3068
3069                 if self._default_exit(unpack_phase) != os.EX_OK:
3070                         self.wait()
3071                         return
3072
3073                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3074
3075                 pkg = self.pkg
3076                 phases = self._phases
3077                 eapi = pkg.metadata["EAPI"]
3078                 if eapi in ("0", "1"):
3079                         # skip src_prepare and src_configure
3080                         phases = phases[2:]
3081
3082                 for phase in phases:
3083                         ebuild_phases.add(EbuildPhase(background=self.background,
3084                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3085                                 settings=self.settings, tree=self._tree))
3086
3087                 self._start_task(ebuild_phases, self._default_final_exit)
3088
3089 class EbuildMetadataPhase(SubProcess):
3090
3091         """
3092         Asynchronous interface for the ebuild "depend" phase which is
3093         used to extract metadata from the ebuild.
3094         """
3095
3096         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3097                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3098                 ("_raw_metadata",)
3099
3100         _file_names = ("ebuild",)
3101         _files_dict = slot_dict_class(_file_names, prefix="")
3102         _metadata_fd = 9
3103
3104         def _start(self):
3105                 settings = self.settings
3106                 settings.setcpv(self.cpv)
3107                 ebuild_path = self.ebuild_path
3108
3109                 eapi = None
3110                 if 'parse-eapi-glep-55' in settings.features:
3111                         pf, eapi = portage._split_ebuild_name_glep55(
3112                                 os.path.basename(ebuild_path))
3113                 if eapi is None and \
3114                         'parse-eapi-ebuild-head' in settings.features:
3115                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3116                                 mode='r', encoding='utf_8', errors='replace'))
3117
3118                 if eapi is not None:
3119                         if not portage.eapi_is_supported(eapi):
3120                                 self.metadata_callback(self.cpv, self.ebuild_path,
3121                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3122                                 self.returncode = os.EX_OK
3123                                 self.wait()
3124                                 return
3125
3126                         settings.configdict['pkg']['EAPI'] = eapi
3127
3128                 debug = settings.get("PORTAGE_DEBUG") == "1"
3129                 master_fd = None
3130                 slave_fd = None
3131                 fd_pipes = None
3132                 if self.fd_pipes is not None:
3133                         fd_pipes = self.fd_pipes.copy()
3134                 else:
3135                         fd_pipes = {}
3136
3137                 fd_pipes.setdefault(0, sys.stdin.fileno())
3138                 fd_pipes.setdefault(1, sys.stdout.fileno())
3139                 fd_pipes.setdefault(2, sys.stderr.fileno())
3140
3141                 # flush any pending output
3142                 for fd in fd_pipes.itervalues():
3143                         if fd == sys.stdout.fileno():
3144                                 sys.stdout.flush()
3145                         if fd == sys.stderr.fileno():
3146                                 sys.stderr.flush()
3147
3148                 fd_pipes_orig = fd_pipes.copy()
3149                 self._files = self._files_dict()
3150                 files = self._files
3151
3152                 master_fd, slave_fd = os.pipe()
3153                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3154                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3155
3156                 fd_pipes[self._metadata_fd] = slave_fd
3157
3158                 self._raw_metadata = []
3159                 files.ebuild = os.fdopen(master_fd, 'r')
3160                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3161                         self._registered_events, self._output_handler)
3162                 self._registered = True
3163
3164                 retval = portage.doebuild(ebuild_path, "depend",
3165                         settings["ROOT"], settings, debug,
3166                         mydbapi=self.portdb, tree="porttree",
3167                         fd_pipes=fd_pipes, returnpid=True)
3168
3169                 os.close(slave_fd)
3170
3171                 if isinstance(retval, int):
3172                         # doebuild failed before spawning
3173                         self._unregister()
3174                         self.returncode = retval
3175                         self.wait()
3176                         return
3177
3178                 self.pid = retval[0]
3179                 portage.process.spawned_pids.remove(self.pid)
3180
3181         def _output_handler(self, fd, event):
3182
3183                 if event & PollConstants.POLLIN:
3184                         self._raw_metadata.append(self._files.ebuild.read())
3185                         if not self._raw_metadata[-1]:
3186                                 self._unregister()
3187                                 self.wait()
3188
3189                 self._unregister_if_appropriate(event)
3190                 return self._registered
3191
3192         def _set_returncode(self, wait_retval):
3193                 SubProcess._set_returncode(self, wait_retval)
3194                 if self.returncode == os.EX_OK:
3195                         metadata_lines = "".join(self._raw_metadata).splitlines()
3196                         if len(portage.auxdbkeys) != len(metadata_lines):
3197                                 # Don't trust bash's returncode if the
3198                                 # number of lines is incorrect.
3199                                 self.returncode = 1
3200                         else:
3201                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3202                                 self.metadata = self.metadata_callback(self.cpv,
3203                                         self.ebuild_path, self.repo_path, metadata,
3204                                         self.ebuild_mtime)
3205
3206 class EbuildProcess(SpawnProcess):
3207
3208         __slots__ = ("phase", "pkg", "settings", "tree")
3209
3210         def _start(self):
3211                 # Don't open the log file during the clean phase since the
3212                 # open file can result in an nfs lock on $T/build.log which
3213                 # prevents the clean phase from removing $T.
3214                 if self.phase not in ("clean", "cleanrm"):
3215                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3216                 SpawnProcess._start(self)
3217
3218         def _pipe(self, fd_pipes):
3219                 stdout_pipe = fd_pipes.get(1)
3220                 got_pty, master_fd, slave_fd = \
3221                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3222                 return (master_fd, slave_fd)
3223
3224         def _spawn(self, args, **kwargs):
3225
3226                 root_config = self.pkg.root_config
3227                 tree = self.tree
3228                 mydbapi = root_config.trees[tree].dbapi
3229                 settings = self.settings
3230                 ebuild_path = settings["EBUILD"]
3231                 debug = settings.get("PORTAGE_DEBUG") == "1"
3232
3233                 rval = portage.doebuild(ebuild_path, self.phase,
3234                         root_config.root, settings, debug,
3235                         mydbapi=mydbapi, tree=tree, **kwargs)
3236
3237                 return rval
3238
3239         def _set_returncode(self, wait_retval):
3240                 SpawnProcess._set_returncode(self, wait_retval)
3241
3242                 if self.phase not in ("clean", "cleanrm"):
3243                         self.returncode = portage._doebuild_exit_status_check_and_log(
3244                                 self.settings, self.phase, self.returncode)
3245
3246                 if self.phase == "test" and self.returncode != os.EX_OK and \
3247                         "test-fail-continue" in self.settings.features:
3248                         self.returncode = os.EX_OK
3249
3250                 portage._post_phase_userpriv_perms(self.settings)
3251
3252 class EbuildPhase(CompositeTask):
3253
3254         __slots__ = ("background", "pkg", "phase",
3255                 "scheduler", "settings", "tree")
3256
3257         _post_phase_cmds = portage._post_phase_cmds
3258
3259         def _start(self):
3260
3261                 ebuild_process = EbuildProcess(background=self.background,
3262                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3263                         settings=self.settings, tree=self.tree)
3264
3265                 self._start_task(ebuild_process, self._ebuild_exit)
3266
3267         def _ebuild_exit(self, ebuild_process):
3268
3269                 if self.phase == "install":
3270                         out = None
3271                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3272                         log_file = None
3273                         if self.background and log_path is not None:
3274                                 log_file = open(log_path, 'a')
3275                                 out = log_file
3276                         try:
3277                                 portage._check_build_log(self.settings, out=out)
3278                         finally:
3279                                 if log_file is not None:
3280                                         log_file.close()
3281
3282                 if self._default_exit(ebuild_process) != os.EX_OK:
3283                         self.wait()
3284                         return
3285
3286                 settings = self.settings
3287
3288                 if self.phase == "install":
3289                         portage._post_src_install_chost_fix(settings)
3290                         portage._post_src_install_uid_fix(settings)
3291
3292                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3293                 if post_phase_cmds is not None:
3294                         post_phase = MiscFunctionsProcess(background=self.background,
3295                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3296                                 scheduler=self.scheduler, settings=settings)
3297                         self._start_task(post_phase, self._post_phase_exit)
3298                         return
3299
3300                 self.returncode = ebuild_process.returncode
3301                 self._current_task = None
3302                 self.wait()
3303
3304         def _post_phase_exit(self, post_phase):
3305                 if self._final_exit(post_phase) != os.EX_OK:
3306                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3307                                 noiselevel=-1)
3308                 self._current_task = None
3309                 self.wait()
3310                 return
3311
3312 class EbuildBinpkg(EbuildProcess):
3313         """
3314         This assumes that src_install() has successfully completed.
3315         """
3316         __slots__ = ("_binpkg_tmpfile",)
3317
3318         def _start(self):
3319                 self.phase = "package"
3320                 self.tree = "porttree"
3321                 pkg = self.pkg
3322                 root_config = pkg.root_config
3323                 portdb = root_config.trees["porttree"].dbapi
3324                 bintree = root_config.trees["bintree"]
3325                 ebuild_path = portdb.findname(self.pkg.cpv)
3326                 settings = self.settings
3327                 debug = settings.get("PORTAGE_DEBUG") == "1"
3328
3329                 bintree.prevent_collision(pkg.cpv)
3330                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3331                         pkg.cpv + ".tbz2." + str(os.getpid()))
3332                 self._binpkg_tmpfile = binpkg_tmpfile
3333                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3334                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3335
3336                 try:
3337                         EbuildProcess._start(self)
3338                 finally:
3339                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3340
3341         def _set_returncode(self, wait_retval):
3342                 EbuildProcess._set_returncode(self, wait_retval)
3343
3344                 pkg = self.pkg
3345                 bintree = pkg.root_config.trees["bintree"]
3346                 binpkg_tmpfile = self._binpkg_tmpfile
3347                 if self.returncode == os.EX_OK:
3348                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3349
3350 class EbuildMerge(SlotObject):
3351
3352         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3353                 "pkg", "pkg_count", "pkg_path", "pretend",
3354                 "scheduler", "settings", "tree", "world_atom")
3355
3356         def execute(self):
3357                 root_config = self.pkg.root_config
3358                 settings = self.settings
3359                 retval = portage.merge(settings["CATEGORY"],
3360                         settings["PF"], settings["D"],
3361                         os.path.join(settings["PORTAGE_BUILDDIR"],
3362                         "build-info"), root_config.root, settings,
3363                         myebuild=settings["EBUILD"],
3364                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3365                         vartree=root_config.trees["vartree"],
3366                         prev_mtimes=self.ldpath_mtimes,
3367                         scheduler=self.scheduler,
3368                         blockers=self.find_blockers)
3369
3370                 if retval == os.EX_OK:
3371                         self.world_atom(self.pkg)
3372                         self._log_success()
3373
3374                 return retval
3375
3376         def _log_success(self):
3377                 pkg = self.pkg
3378                 pkg_count = self.pkg_count
3379                 pkg_path = self.pkg_path
3380                 logger = self.logger
3381                 if "noclean" not in self.settings.features:
3382                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3383                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3384                         logger.log((" === (%s of %s) " + \
3385                                 "Post-Build Cleaning (%s::%s)") % \
3386                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3387                                 short_msg=short_msg)
3388                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3389                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3390
3391 class PackageUninstall(AsynchronousTask):
3392
3393         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3394
3395         def _start(self):
3396                 try:
3397                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3398                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3399                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3400                                 writemsg_level=self._writemsg_level)
3401                 except UninstallFailure, e:
3402                         self.returncode = e.status
3403                 else:
3404                         self.returncode = os.EX_OK
3405                 self.wait()
3406
3407         def _writemsg_level(self, msg, level=0, noiselevel=0):
3408
3409                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3410                 background = self.background
3411
3412                 if log_path is None:
3413                         if not (background and level < logging.WARNING):
3414                                 portage.util.writemsg_level(msg,
3415                                         level=level, noiselevel=noiselevel)
3416                 else:
3417                         if not background:
3418                                 portage.util.writemsg_level(msg,
3419                                         level=level, noiselevel=noiselevel)
3420
3421                         f = open(log_path, 'a')
3422                         try:
3423                                 f.write(msg)
3424                         finally:
3425                                 f.close()
3426
3427 class Binpkg(CompositeTask):
3428
3429         __slots__ = ("find_blockers",
3430                 "ldpath_mtimes", "logger", "opts",
3431                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3432                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3433                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3434
3435         def _writemsg_level(self, msg, level=0, noiselevel=0):
3436
3437                 if not self.background:
3438                         portage.util.writemsg_level(msg,
3439                                 level=level, noiselevel=noiselevel)
3440
3441                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3442                 if  log_path is not None:
3443                         f = open(log_path, 'a')
3444                         try:
3445                                 f.write(msg)
3446                         finally:
3447                                 f.close()
3448
3449         def _start(self):
3450
3451                 pkg = self.pkg
3452                 settings = self.settings
3453                 settings.setcpv(pkg)
3454                 self._tree = "bintree"
3455                 self._bintree = self.pkg.root_config.trees[self._tree]
3456                 self._verify = not self.opts.pretend
3457
3458                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3459                         "portage", pkg.category, pkg.pf)
3460                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3461                         pkg=pkg, settings=settings)
3462                 self._image_dir = os.path.join(dir_path, "image")
3463                 self._infloc = os.path.join(dir_path, "build-info")
3464                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3465                 settings["EBUILD"] = self._ebuild_path
3466                 debug = settings.get("PORTAGE_DEBUG") == "1"
3467                 portage.doebuild_environment(self._ebuild_path, "setup",
3468                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3469                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3470
3471                 # The prefetcher has already completed or it
3472                 # could be running now. If it's running now,
3473                 # wait for it to complete since it holds
3474                 # a lock on the file being fetched. The
3475                 # portage.locks functions are only designed
3476                 # to work between separate processes. Since
3477                 # the lock is held by the current process,
3478                 # use the scheduler and fetcher methods to
3479                 # synchronize with the fetcher.
3480                 prefetcher = self.prefetcher
3481                 if prefetcher is None:
3482                         pass
3483                 elif not prefetcher.isAlive():
3484                         prefetcher.cancel()
3485                 elif prefetcher.poll() is None:
3486
3487                         waiting_msg = ("Fetching '%s' " + \
3488                                 "in the background. " + \
3489                                 "To view fetch progress, run `tail -f " + \
3490                                 "/var/log/emerge-fetch.log` in another " + \
3491                                 "terminal.") % prefetcher.pkg_path
3492                         msg_prefix = colorize("GOOD", " * ")
3493                         from textwrap import wrap
3494                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3495                                 for line in wrap(waiting_msg, 65))
3496                         if not self.background:
3497                                 writemsg(waiting_msg, noiselevel=-1)
3498
3499                         self._current_task = prefetcher
3500                         prefetcher.addExitListener(self._prefetch_exit)
3501                         return
3502
3503                 self._prefetch_exit(prefetcher)
3504
3505         def _prefetch_exit(self, prefetcher):
3506
3507                 pkg = self.pkg
3508                 pkg_count = self.pkg_count
3509                 if not (self.opts.pretend or self.opts.fetchonly):
3510                         self._build_dir.lock()
3511                         # If necessary, discard old log so that we don't
3512                         # append to it.
3513                         self._build_dir.clean_log()
3514                         # Initialze PORTAGE_LOG_FILE.
3515                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3516                 fetcher = BinpkgFetcher(background=self.background,
3517                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3518                         pretend=self.opts.pretend, scheduler=self.scheduler)
3519                 pkg_path = fetcher.pkg_path
3520                 self._pkg_path = pkg_path
3521
3522                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3523
3524                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3525                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3526                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3527                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3528                         self.logger.log(msg, short_msg=short_msg)
3529                         self._start_task(fetcher, self._fetcher_exit)
3530                         return
3531
3532                 self._fetcher_exit(fetcher)
3533
3534         def _fetcher_exit(self, fetcher):
3535
3536                 # The fetcher only has a returncode when
3537                 # --getbinpkg is enabled.
3538                 if fetcher.returncode is not None:
3539                         self._fetched_pkg = True
3540                         if self._default_exit(fetcher) != os.EX_OK:
3541                                 self._unlock_builddir()
3542                                 self.wait()
3543                                 return
3544
3545                 if self.opts.pretend:
3546                         self._current_task = None
3547                         self.returncode = os.EX_OK
3548                         self.wait()
3549                         return
3550
3551                 verifier = None
3552                 if self._verify:
3553                         logfile = None
3554                         if self.background:
3555                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3556                         verifier = BinpkgVerifier(background=self.background,
3557                                 logfile=logfile, pkg=self.pkg)
3558                         self._start_task(verifier, self._verifier_exit)
3559                         return
3560
3561                 self._verifier_exit(verifier)
3562
3563         def _verifier_exit(self, verifier):
3564                 if verifier is not None and \
3565                         self._default_exit(verifier) != os.EX_OK:
3566                         self._unlock_builddir()
3567                         self.wait()
3568                         return
3569
3570                 logger = self.logger
3571                 pkg = self.pkg
3572                 pkg_count = self.pkg_count
3573                 pkg_path = self._pkg_path
3574
3575                 if self._fetched_pkg:
3576                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3577
3578                 if self.opts.fetchonly:
3579                         self._current_task = None
3580                         self.returncode = os.EX_OK
3581                         self.wait()
3582                         return
3583
3584                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3585                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3586                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3587                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3588                 logger.log(msg, short_msg=short_msg)
3589
3590                 phase = "clean"
3591                 settings = self.settings
3592                 ebuild_phase = EbuildPhase(background=self.background,
3593                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3594                         settings=settings, tree=self._tree)
3595
3596                 self._start_task(ebuild_phase, self._clean_exit)
3597
3598         def _clean_exit(self, clean_phase):
3599                 if self._default_exit(clean_phase) != os.EX_OK:
3600                         self._unlock_builddir()
3601                         self.wait()
3602                         return
3603
3604                 dir_path = self._build_dir.dir_path
3605
3606                 infloc = self._infloc
3607                 pkg = self.pkg
3608                 pkg_path = self._pkg_path
3609
3610                 dir_mode = 0755
3611                 for mydir in (dir_path, self._image_dir, infloc):
3612                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3613                                 gid=portage.data.portage_gid, mode=dir_mode)
3614
3615                 # This initializes PORTAGE_LOG_FILE.
3616                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3617                 self._writemsg_level(">>> Extracting info\n")
3618
3619                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3620                 check_missing_metadata = ("CATEGORY", "PF")
3621                 missing_metadata = set()
3622                 for k in check_missing_metadata:
3623                         v = pkg_xpak.getfile(k)
3624                         if not v:
3625                                 missing_metadata.add(k)
3626
3627                 pkg_xpak.unpackinfo(infloc)
3628                 for k in missing_metadata:
3629                         if k == "CATEGORY":
3630                                 v = pkg.category
3631                         elif k == "PF":
3632                                 v = pkg.pf
3633                         else:
3634                                 continue
3635
3636                         f = open(os.path.join(infloc, k), 'wb')
3637                         try:
3638                                 f.write(v + "\n")
3639                         finally:
3640                                 f.close()
3641
3642                 # Store the md5sum in the vdb.
3643                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3644                 try:
3645                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3646                 finally:
3647                         f.close()
3648
3649                 # This gives bashrc users an opportunity to do various things
3650                 # such as remove binary packages after they're installed.
3651                 settings = self.settings
3652                 settings.setcpv(self.pkg)
3653                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3654                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3655
3656                 phase = "setup"
3657                 setup_phase = EbuildPhase(background=self.background,
3658                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3659                         settings=settings, tree=self._tree)
3660
3661                 setup_phase.addExitListener(self._setup_exit)
3662                 self._current_task = setup_phase
3663                 self.scheduler.scheduleSetup(setup_phase)
3664
3665         def _setup_exit(self, setup_phase):
3666                 if self._default_exit(setup_phase) != os.EX_OK:
3667                         self._unlock_builddir()
3668                         self.wait()
3669                         return
3670
3671                 extractor = BinpkgExtractorAsync(background=self.background,
3672                         image_dir=self._image_dir,
3673                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3674                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3675                 self._start_task(extractor, self._extractor_exit)
3676
3677         def _extractor_exit(self, extractor):
3678                 if self._final_exit(extractor) != os.EX_OK:
3679                         self._unlock_builddir()
3680                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3681                                 noiselevel=-1)
3682                 self.wait()
3683
3684         def _unlock_builddir(self):
3685                 if self.opts.pretend or self.opts.fetchonly:
3686                         return
3687                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3688                 self._build_dir.unlock()
3689
3690         def install(self):
3691
3692                 # This gives bashrc users an opportunity to do various things
3693                 # such as remove binary packages after they're installed.
3694                 settings = self.settings
3695                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3696                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3697
3698                 merge = EbuildMerge(find_blockers=self.find_blockers,
3699                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3700                         pkg=self.pkg, pkg_count=self.pkg_count,
3701                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3702                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3703
3704                 try:
3705                         retval = merge.execute()
3706                 finally:
3707                         settings.pop("PORTAGE_BINPKG_FILE", None)
3708                         self._unlock_builddir()
3709                 return retval
3710
3711 class BinpkgFetcher(SpawnProcess):
3712
3713         __slots__ = ("pkg", "pretend",
3714                 "locked", "pkg_path", "_lock_obj")
3715
3716         def __init__(self, **kwargs):
3717                 SpawnProcess.__init__(self, **kwargs)
3718                 pkg = self.pkg
3719                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3720
3721         def _start(self):
3722
3723                 if self.cancelled:
3724                         return
3725
3726                 pkg = self.pkg
3727                 pretend = self.pretend
3728                 bintree = pkg.root_config.trees["bintree"]
3729                 settings = bintree.settings
3730                 use_locks = "distlocks" in settings.features
3731                 pkg_path = self.pkg_path
3732
3733                 if not pretend:
3734                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3735                         if use_locks:
3736                                 self.lock()
3737                 exists = os.path.exists(pkg_path)
3738                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3739                 if not (pretend or resume):
3740                         # Remove existing file or broken symlink.
3741                         try:
3742                                 os.unlink(pkg_path)
3743                         except OSError:
3744                                 pass
3745
3746                 # urljoin doesn't work correctly with
3747                 # unrecognized protocols like sftp
3748                 if bintree._remote_has_index:
3749                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3750                         if not rel_uri:
3751                                 rel_uri = pkg.cpv + ".tbz2"
3752                         uri = bintree._remote_base_uri.rstrip("/") + \
3753                                 "/" + rel_uri.lstrip("/")
3754                 else:
3755                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3756                                 "/" + pkg.pf + ".tbz2"
3757
3758                 if pretend:
3759                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3760                         self.returncode = os.EX_OK
3761                         self.wait()
3762                         return
3763
3764                 protocol = urlparse.urlparse(uri)[0]
3765                 fcmd_prefix = "FETCHCOMMAND"
3766                 if resume:
3767                         fcmd_prefix = "RESUMECOMMAND"
3768                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3769                 if not fcmd:
3770                         fcmd = settings.get(fcmd_prefix)
3771
3772                 fcmd_vars = {
3773                         "DISTDIR" : os.path.dirname(pkg_path),
3774                         "URI"     : uri,
3775                         "FILE"    : os.path.basename(pkg_path)
3776                 }
3777
3778                 fetch_env = dict(settings.iteritems())
3779                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3780                         for x in shlex.split(fcmd)]
3781
3782                 if self.fd_pipes is None:
3783                         self.fd_pipes = {}
3784                 fd_pipes = self.fd_pipes
3785
3786                 # Redirect all output to stdout since some fetchers like
3787                 # wget pollute stderr (if portage detects a problem then it
3788                 # can send it's own message to stderr).
3789                 fd_pipes.setdefault(0, sys.stdin.fileno())
3790                 fd_pipes.setdefault(1, sys.stdout.fileno())
3791                 fd_pipes.setdefault(2, sys.stdout.fileno())
3792
3793                 self.args = fetch_args
3794                 self.env = fetch_env
3795                 SpawnProcess._start(self)
3796
3797         def _set_returncode(self, wait_retval):
3798                 SpawnProcess._set_returncode(self, wait_retval)
3799                 if self.returncode == os.EX_OK:
3800                         # If possible, update the mtime to match the remote package if
3801                         # the fetcher didn't already do it automatically.
3802                         bintree = self.pkg.root_config.trees["bintree"]
3803                         if bintree._remote_has_index:
3804                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3805                                 if remote_mtime is not None:
3806                                         try:
3807                                                 remote_mtime = long(remote_mtime)
3808                                         except ValueError:
3809                                                 pass
3810                                         else:
3811                                                 try:
3812                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3813                                                 except OSError:
3814                                                         pass
3815                                                 else:
3816                                                         if remote_mtime != local_mtime:
3817                                                                 try:
3818                                                                         os.utime(self.pkg_path,
3819                                                                                 (remote_mtime, remote_mtime))
3820                                                                 except OSError:
3821                                                                         pass
3822
3823                 if self.locked:
3824                         self.unlock()
3825
3826         def lock(self):
3827                 """
3828                 This raises an AlreadyLocked exception if lock() is called
3829                 while a lock is already held. In order to avoid this, call
3830                 unlock() or check whether the "locked" attribute is True
3831                 or False before calling lock().
3832                 """
3833                 if self._lock_obj is not None:
3834                         raise self.AlreadyLocked((self._lock_obj,))
3835
3836                 self._lock_obj = portage.locks.lockfile(
3837                         self.pkg_path, wantnewlockfile=1)
3838                 self.locked = True
3839
3840         class AlreadyLocked(portage.exception.PortageException):
3841                 pass
3842
3843         def unlock(self):
3844                 if self._lock_obj is None:
3845                         return
3846                 portage.locks.unlockfile(self._lock_obj)
3847                 self._lock_obj = None
3848                 self.locked = False
3849
3850 class BinpkgVerifier(AsynchronousTask):
3851         __slots__ = ("logfile", "pkg",)
3852
3853         def _start(self):
3854                 """
3855                 Note: Unlike a normal AsynchronousTask.start() method,
3856                 this one does all work is synchronously. The returncode
3857                 attribute will be set before it returns.
3858                 """
3859
3860                 pkg = self.pkg
3861                 root_config = pkg.root_config
3862                 bintree = root_config.trees["bintree"]
3863                 rval = os.EX_OK
3864                 stdout_orig = sys.stdout
3865                 stderr_orig = sys.stderr
3866                 log_file = None
3867                 if self.background and self.logfile is not None:
3868                         log_file = open(self.logfile, 'a')
3869                 try:
3870                         if log_file is not None:
3871                                 sys.stdout = log_file
3872                                 sys.stderr = log_file
3873                         try:
3874                                 bintree.digestCheck(pkg)
3875                         except portage.exception.FileNotFound:
3876                                 writemsg("!!! Fetching Binary failed " + \
3877                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3878                                 rval = 1
3879                         except portage.exception.DigestException, e:
3880                                 writemsg("\n!!! Digest verification failed:\n",
3881                                         noiselevel=-1)
3882                                 writemsg("!!! %s\n" % e.value[0],
3883                                         noiselevel=-1)
3884                                 writemsg("!!! Reason: %s\n" % e.value[1],
3885                                         noiselevel=-1)
3886                                 writemsg("!!! Got: %s\n" % e.value[2],
3887                                         noiselevel=-1)
3888                                 writemsg("!!! Expected: %s\n" % e.value[3],
3889                                         noiselevel=-1)
3890                                 rval = 1
3891                         if rval != os.EX_OK:
3892                                 pkg_path = bintree.getname(pkg.cpv)
3893                                 head, tail = os.path.split(pkg_path)
3894                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3895                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3896                                         noiselevel=-1)
3897                 finally:
3898                         sys.stdout = stdout_orig
3899                         sys.stderr = stderr_orig
3900                         if log_file is not None:
3901                                 log_file.close()
3902
3903                 self.returncode = rval
3904                 self.wait()
3905
3906 class BinpkgPrefetcher(CompositeTask):
3907
3908         __slots__ = ("pkg",) + \
3909                 ("pkg_path", "_bintree",)
3910
3911         def _start(self):
3912                 self._bintree = self.pkg.root_config.trees["bintree"]
3913                 fetcher = BinpkgFetcher(background=self.background,
3914                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3915                         scheduler=self.scheduler)
3916                 self.pkg_path = fetcher.pkg_path
3917                 self._start_task(fetcher, self._fetcher_exit)
3918
3919         def _fetcher_exit(self, fetcher):
3920
3921                 if self._default_exit(fetcher) != os.EX_OK:
3922                         self.wait()
3923                         return
3924
3925                 verifier = BinpkgVerifier(background=self.background,
3926                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3927                 self._start_task(verifier, self._verifier_exit)
3928
3929         def _verifier_exit(self, verifier):
3930                 if self._default_exit(verifier) != os.EX_OK:
3931                         self.wait()
3932                         return
3933
3934                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3935
3936                 self._current_task = None
3937                 self.returncode = os.EX_OK
3938                 self.wait()
3939
3940 class BinpkgExtractorAsync(SpawnProcess):
3941
3942         __slots__ = ("image_dir", "pkg", "pkg_path")
3943
3944         _shell_binary = portage.const.BASH_BINARY
3945
3946         def _start(self):
3947                 self.args = [self._shell_binary, "-c",
3948                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3949                         (portage._shell_quote(self.pkg_path),
3950                         portage._shell_quote(self.image_dir))]
3951
3952                 self.env = self.pkg.root_config.settings.environ()
3953                 SpawnProcess._start(self)
3954
3955 class MergeListItem(CompositeTask):
3956
3957         """
3958         TODO: For parallel scheduling, everything here needs asynchronous
3959         execution support (start, poll, and wait methods).
3960         """
3961
3962         __slots__ = ("args_set",
3963                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3964                 "find_blockers", "logger", "mtimedb", "pkg",
3965                 "pkg_count", "pkg_to_replace", "prefetcher",
3966                 "settings", "statusMessage", "world_atom") + \
3967                 ("_install_task",)
3968
3969         def _start(self):
3970
3971                 pkg = self.pkg
3972                 build_opts = self.build_opts
3973
3974                 if pkg.installed:
3975                         # uninstall,  executed by self.merge()
3976                         self.returncode = os.EX_OK
3977                         self.wait()
3978                         return
3979
3980                 args_set = self.args_set
3981                 find_blockers = self.find_blockers
3982                 logger = self.logger
3983                 mtimedb = self.mtimedb
3984                 pkg_count = self.pkg_count
3985                 scheduler = self.scheduler
3986                 settings = self.settings
3987                 world_atom = self.world_atom
3988                 ldpath_mtimes = mtimedb["ldpath"]
3989
3990                 action_desc = "Emerging"
3991                 preposition = "for"
3992                 if pkg.type_name == "binary":
3993                         action_desc += " binary"
3994
3995                 if build_opts.fetchonly:
3996                         action_desc = "Fetching"
3997
3998                 msg = "%s (%s of %s) %s" % \
3999                         (action_desc,
4000                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4001                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
4002                         colorize("GOOD", pkg.cpv))
4003
4004                 portdb = pkg.root_config.trees["porttree"].dbapi
4005                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
4006                 if portdir_repo_name:
4007                         pkg_repo_name = pkg.metadata.get("repository")
4008                         if pkg_repo_name != portdir_repo_name:
4009                                 if not pkg_repo_name:
4010                                         pkg_repo_name = "unknown repo"
4011                                 msg += " from %s" % pkg_repo_name
4012
4013                 if pkg.root != "/":
4014                         msg += " %s %s" % (preposition, pkg.root)
4015
4016                 if not build_opts.pretend:
4017                         self.statusMessage(msg)
4018                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
4019                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
4020
4021                 if pkg.type_name == "ebuild":
4022
4023                         build = EbuildBuild(args_set=args_set,
4024                                 background=self.background,
4025                                 config_pool=self.config_pool,
4026                                 find_blockers=find_blockers,
4027                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4028                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
4029                                 prefetcher=self.prefetcher, scheduler=scheduler,
4030                                 settings=settings, world_atom=world_atom)
4031
4032                         self._install_task = build
4033                         self._start_task(build, self._default_final_exit)
4034                         return
4035
4036                 elif pkg.type_name == "binary":
4037
4038                         binpkg = Binpkg(background=self.background,
4039                                 find_blockers=find_blockers,
4040                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4041                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4042                                 prefetcher=self.prefetcher, settings=settings,
4043                                 scheduler=scheduler, world_atom=world_atom)
4044
4045                         self._install_task = binpkg
4046                         self._start_task(binpkg, self._default_final_exit)
4047                         return
4048
4049         def _poll(self):
4050                 self._install_task.poll()
4051                 return self.returncode
4052
4053         def _wait(self):
4054                 self._install_task.wait()
4055                 return self.returncode
4056
4057         def merge(self):
4058
4059                 pkg = self.pkg
4060                 build_opts = self.build_opts
4061                 find_blockers = self.find_blockers
4062                 logger = self.logger
4063                 mtimedb = self.mtimedb
4064                 pkg_count = self.pkg_count
4065                 prefetcher = self.prefetcher
4066                 scheduler = self.scheduler
4067                 settings = self.settings
4068                 world_atom = self.world_atom
4069                 ldpath_mtimes = mtimedb["ldpath"]
4070
4071                 if pkg.installed:
4072                         if not (build_opts.buildpkgonly or \
4073                                 build_opts.fetchonly or build_opts.pretend):
4074
4075                                 uninstall = PackageUninstall(background=self.background,
4076                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4077                                         pkg=pkg, scheduler=scheduler, settings=settings)
4078
4079                                 uninstall.start()
4080                                 retval = uninstall.wait()
4081                                 if retval != os.EX_OK:
4082                                         return retval
4083                         return os.EX_OK
4084
4085                 if build_opts.fetchonly or \
4086                         build_opts.buildpkgonly:
4087                         return self.returncode
4088
4089                 retval = self._install_task.install()
4090                 return retval
4091
4092 class PackageMerge(AsynchronousTask):
4093         """
4094         TODO: Implement asynchronous merge so that the scheduler can
4095         run while a merge is executing.
4096         """
4097
4098         __slots__ = ("merge",)
4099
4100         def _start(self):
4101
4102                 pkg = self.merge.pkg
4103                 pkg_count = self.merge.pkg_count
4104
4105                 if pkg.installed:
4106                         action_desc = "Uninstalling"
4107                         preposition = "from"
4108                         counter_str = ""
4109                 else:
4110                         action_desc = "Installing"
4111                         preposition = "to"
4112                         counter_str = "(%s of %s) " % \
4113                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4114                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4115
4116                 msg = "%s %s%s" % \
4117                         (action_desc,
4118                         counter_str,
4119                         colorize("GOOD", pkg.cpv))
4120
4121                 if pkg.root != "/":
4122                         msg += " %s %s" % (preposition, pkg.root)
4123
4124                 if not self.merge.build_opts.fetchonly and \
4125                         not self.merge.build_opts.pretend and \
4126                         not self.merge.build_opts.buildpkgonly:
4127                         self.merge.statusMessage(msg)
4128
4129                 self.returncode = self.merge.merge()
4130                 self.wait()
4131
4132 class DependencyArg(object):
4133         def __init__(self, arg=None, root_config=None):
4134                 self.arg = arg
4135                 self.root_config = root_config
4136
4137         def __str__(self):
4138                 return str(self.arg)
4139
4140 class AtomArg(DependencyArg):
4141         def __init__(self, atom=None, **kwargs):
4142                 DependencyArg.__init__(self, **kwargs)
4143                 self.atom = atom
4144                 if not isinstance(self.atom, portage.dep.Atom):
4145                         self.atom = portage.dep.Atom(self.atom)
4146                 self.set = (self.atom, )
4147
4148 class PackageArg(DependencyArg):
4149         def __init__(self, package=None, **kwargs):
4150                 DependencyArg.__init__(self, **kwargs)
4151                 self.package = package
4152                 self.atom = portage.dep.Atom("=" + package.cpv)
4153                 self.set = (self.atom, )
4154
4155 class SetArg(DependencyArg):
4156         def __init__(self, set=None, **kwargs):
4157                 DependencyArg.__init__(self, **kwargs)
4158                 self.set = set
4159                 self.name = self.arg[len(SETPREFIX):]
4160
4161 class Dependency(SlotObject):
4162         __slots__ = ("atom", "blocker", "depth",
4163                 "parent", "onlydeps", "priority", "root")
4164         def __init__(self, **kwargs):
4165                 SlotObject.__init__(self, **kwargs)
4166                 if self.priority is None:
4167                         self.priority = DepPriority()
4168                 if self.depth is None:
4169                         self.depth = 0
4170
4171 class BlockerCache(portage.cache.mappings.MutableMapping):
4172         """This caches blockers of installed packages so that dep_check does not
4173         have to be done for every single installed package on every invocation of
4174         emerge.  The cache is invalidated whenever it is detected that something
4175         has changed that might alter the results of dep_check() calls:
4176                 1) the set of installed packages (including COUNTER) has changed
4177                 2) the old-style virtuals have changed
4178         """
4179
4180         # Number of uncached packages to trigger cache update, since
4181         # it's wasteful to update it for every vdb change.
4182         _cache_threshold = 5
4183
4184         class BlockerData(object):
4185
4186                 __slots__ = ("__weakref__", "atoms", "counter")
4187
4188                 def __init__(self, counter, atoms):
4189                         self.counter = counter
4190                         self.atoms = atoms
4191
4192         def __init__(self, myroot, vardb):
4193                 self._vardb = vardb
4194                 self._virtuals = vardb.settings.getvirtuals()
4195                 self._cache_filename = os.path.join(myroot,
4196                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4197                 self._cache_version = "1"
4198                 self._cache_data = None
4199                 self._modified = set()
4200                 self._load()
4201
4202         def _load(self):
4203                 try:
4204                         f = open(self._cache_filename, mode='rb')
4205                         mypickle = pickle.Unpickler(f)
4206                         try:
4207                                 mypickle.find_global = None
4208                         except AttributeError:
4209                                 # TODO: If py3k, override Unpickler.find_class().
4210                                 pass
4211                         self._cache_data = mypickle.load()
4212                         f.close()
4213                         del f
4214                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4215                         if isinstance(e, pickle.UnpicklingError):
4216                                 writemsg("!!! Error loading '%s': %s\n" % \
4217                                         (self._cache_filename, str(e)), noiselevel=-1)
4218                         del e
4219
4220                 cache_valid = self._cache_data and \
4221                         isinstance(self._cache_data, dict) and \
4222                         self._cache_data.get("version") == self._cache_version and \
4223                         isinstance(self._cache_data.get("blockers"), dict)
4224                 if cache_valid:
4225                         # Validate all the atoms and counters so that
4226                         # corruption is detected as soon as possible.
4227                         invalid_items = set()
4228                         for k, v in self._cache_data["blockers"].iteritems():
4229                                 if not isinstance(k, basestring):
4230                                         invalid_items.add(k)
4231                                         continue
4232                                 try:
4233                                         if portage.catpkgsplit(k) is None:
4234                                                 invalid_items.add(k)
4235                                                 continue
4236                                 except portage.exception.InvalidData:
4237                                         invalid_items.add(k)
4238                                         continue
4239                                 if not isinstance(v, tuple) or \
4240                                         len(v) != 2:
4241                                         invalid_items.add(k)
4242                                         continue
4243                                 counter, atoms = v
4244                                 if not isinstance(counter, (int, long)):
4245                                         invalid_items.add(k)
4246                                         continue
4247                                 if not isinstance(atoms, (list, tuple)):
4248                                         invalid_items.add(k)
4249                                         continue
4250                                 invalid_atom = False
4251                                 for atom in atoms:
4252                                         if not isinstance(atom, basestring):
4253                                                 invalid_atom = True
4254                                                 break
4255                                         if atom[:1] != "!" or \
4256                                                 not portage.isvalidatom(
4257                                                 atom, allow_blockers=True):
4258                                                 invalid_atom = True
4259                                                 break
4260                                 if invalid_atom:
4261                                         invalid_items.add(k)
4262                                         continue
4263
4264                         for k in invalid_items:
4265                                 del self._cache_data["blockers"][k]
4266                         if not self._cache_data["blockers"]:
4267                                 cache_valid = False
4268
4269                 if not cache_valid:
4270                         self._cache_data = {"version":self._cache_version}
4271                         self._cache_data["blockers"] = {}
4272                         self._cache_data["virtuals"] = self._virtuals
4273                 self._modified.clear()
4274
4275         def flush(self):
4276                 """If the current user has permission and the internal blocker cache
4277                 been updated, save it to disk and mark it unmodified.  This is called
4278                 by emerge after it has proccessed blockers for all installed packages.
4279                 Currently, the cache is only written if the user has superuser
4280                 privileges (since that's required to obtain a lock), but all users
4281                 have read access and benefit from faster blocker lookups (as long as
4282                 the entire cache is still valid).  The cache is stored as a pickled
4283                 dict object with the following format:
4284
4285                 {
4286                         version : "1",
4287                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4288                         "virtuals" : vardb.settings.getvirtuals()
4289                 }
4290                 """
4291                 if len(self._modified) >= self._cache_threshold and \
4292                         secpass >= 2:
4293                         try:
4294                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4295                                 pickle.dump(self._cache_data, f, protocol=2)
4296                                 f.close()
4297                                 portage.util.apply_secpass_permissions(
4298                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4299                         except (IOError, OSError), e:
4300                                 pass
4301                         self._modified.clear()
4302
4303         def __setitem__(self, cpv, blocker_data):
4304                 """
4305                 Update the cache and mark it as modified for a future call to
4306                 self.flush().
4307
4308                 @param cpv: Package for which to cache blockers.
4309                 @type cpv: String
4310                 @param blocker_data: An object with counter and atoms attributes.
4311                 @type blocker_data: BlockerData
4312                 """
4313                 self._cache_data["blockers"][cpv] = \
4314                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4315                 self._modified.add(cpv)
4316
4317         def __iter__(self):
4318                 if self._cache_data is None:
4319                         # triggered by python-trace
4320                         return iter([])
4321                 return iter(self._cache_data["blockers"])
4322
4323         def __delitem__(self, cpv):
4324                 del self._cache_data["blockers"][cpv]
4325
4326         def __getitem__(self, cpv):
4327                 """
4328                 @rtype: BlockerData
4329                 @returns: An object with counter and atoms attributes.
4330                 """
4331                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4332
4333 class BlockerDB(object):
4334
4335         def __init__(self, root_config):
4336                 self._root_config = root_config
4337                 self._vartree = root_config.trees["vartree"]
4338                 self._portdb = root_config.trees["porttree"].dbapi
4339
4340                 self._dep_check_trees = None
4341                 self._fake_vartree = None
4342
4343         def _get_fake_vartree(self, acquire_lock=0):
4344                 fake_vartree = self._fake_vartree
4345                 if fake_vartree is None:
4346                         fake_vartree = FakeVartree(self._root_config,
4347                                 acquire_lock=acquire_lock)
4348                         self._fake_vartree = fake_vartree
4349                         self._dep_check_trees = { self._vartree.root : {
4350                                 "porttree"    :  fake_vartree,
4351                                 "vartree"     :  fake_vartree,
4352                         }}
4353                 else:
4354                         fake_vartree.sync(acquire_lock=acquire_lock)
4355                 return fake_vartree
4356
4357         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4358                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4359                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4360                 settings = self._vartree.settings
4361                 stale_cache = set(blocker_cache)
4362                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4363                 dep_check_trees = self._dep_check_trees
4364                 vardb = fake_vartree.dbapi
4365                 installed_pkgs = list(vardb)
4366
4367                 for inst_pkg in installed_pkgs:
4368                         stale_cache.discard(inst_pkg.cpv)
4369                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4370                         if cached_blockers is not None and \
4371                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4372                                 cached_blockers = None
4373                         if cached_blockers is not None:
4374                                 blocker_atoms = cached_blockers.atoms
4375                         else:
4376                                 # Use aux_get() to trigger FakeVartree global
4377                                 # updates on *DEPEND when appropriate.
4378                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4379                                 try:
4380                                         portage.dep._dep_check_strict = False
4381                                         success, atoms = portage.dep_check(depstr,
4382                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4383                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4384                                 finally:
4385                                         portage.dep._dep_check_strict = True
4386                                 if not success:
4387                                         pkg_location = os.path.join(inst_pkg.root,
4388                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4389                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4390                                                 (pkg_location, atoms), noiselevel=-1)
4391                                         continue
4392
4393                                 blocker_atoms = [atom for atom in atoms \
4394                                         if atom.startswith("!")]
4395                                 blocker_atoms.sort()
4396                                 counter = long(inst_pkg.metadata["COUNTER"])
4397                                 blocker_cache[inst_pkg.cpv] = \
4398                                         blocker_cache.BlockerData(counter, blocker_atoms)
4399                 for cpv in stale_cache:
4400                         del blocker_cache[cpv]
4401                 blocker_cache.flush()
4402
4403                 blocker_parents = digraph()
4404                 blocker_atoms = []
4405                 for pkg in installed_pkgs:
4406                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4407                                 blocker_atom = blocker_atom.lstrip("!")
4408                                 blocker_atoms.append(blocker_atom)
4409                                 blocker_parents.add(blocker_atom, pkg)
4410
4411                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4412                 blocking_pkgs = set()
4413                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4414                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4415
4416                 # Check for blockers in the other direction.
4417                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4418                 try:
4419                         portage.dep._dep_check_strict = False
4420                         success, atoms = portage.dep_check(depstr,
4421                                 vardb, settings, myuse=new_pkg.use.enabled,
4422                                 trees=dep_check_trees, myroot=new_pkg.root)
4423                 finally:
4424                         portage.dep._dep_check_strict = True
4425                 if not success:
4426                         # We should never get this far with invalid deps.
4427                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4428                         assert False
4429
4430                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4431                         if atom[:1] == "!"]
4432                 if blocker_atoms:
4433                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4434                         for inst_pkg in installed_pkgs:
4435                                 try:
4436                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4437                                 except (portage.exception.InvalidDependString, StopIteration):
4438                                         continue
4439                                 blocking_pkgs.add(inst_pkg)
4440
4441                 return blocking_pkgs
4442
4443 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4444
4445         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4446                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4447         p_type, p_root, p_key, p_status = parent_node
4448         msg = []
4449         if p_status == "nomerge":
4450                 category, pf = portage.catsplit(p_key)
4451                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4452                 msg.append("Portage is unable to process the dependencies of the ")
4453                 msg.append("'%s' package. " % p_key)
4454                 msg.append("In order to correct this problem, the package ")
4455                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4456                 msg.append("As a temporary workaround, the --nodeps option can ")
4457                 msg.append("be used to ignore all dependencies.  For reference, ")
4458                 msg.append("the problematic dependencies can be found in the ")
4459                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4460         else:
4461                 msg.append("This package can not be installed. ")
4462                 msg.append("Please notify the '%s' package maintainer " % p_key)
4463                 msg.append("about this problem.")
4464
4465         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4466         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4467
4468 class PackageVirtualDbapi(portage.dbapi):
4469         """
4470         A dbapi-like interface class that represents the state of the installed
4471         package database as new packages are installed, replacing any packages
4472         that previously existed in the same slot. The main difference between
4473         this class and fakedbapi is that this one uses Package instances
4474         internally (passed in via cpv_inject() and cpv_remove() calls).
4475         """
4476         def __init__(self, settings):
4477                 portage.dbapi.__init__(self)
4478                 self.settings = settings
4479                 self._match_cache = {}
4480                 self._cp_map = {}
4481                 self._cpv_map = {}
4482
4483         def clear(self):
4484                 """
4485                 Remove all packages.
4486                 """
4487                 if self._cpv_map:
4488                         self._clear_cache()
4489                         self._cp_map.clear()
4490                         self._cpv_map.clear()
4491
4492         def copy(self):
4493                 obj = PackageVirtualDbapi(self.settings)
4494                 obj._match_cache = self._match_cache.copy()
4495                 obj._cp_map = self._cp_map.copy()
4496                 for k, v in obj._cp_map.iteritems():
4497                         obj._cp_map[k] = v[:]
4498                 obj._cpv_map = self._cpv_map.copy()
4499                 return obj
4500
4501         def __iter__(self):
4502                 return self._cpv_map.itervalues()
4503
4504         def __contains__(self, item):
4505                 existing = self._cpv_map.get(item.cpv)
4506                 if existing is not None and \
4507                         existing == item:
4508                         return True
4509                 return False
4510
4511         def get(self, item, default=None):
4512                 cpv = getattr(item, "cpv", None)
4513                 if cpv is None:
4514                         if len(item) != 4:
4515                                 return default
4516                         type_name, root, cpv, operation = item
4517
4518                 existing = self._cpv_map.get(cpv)
4519                 if existing is not None and \
4520                         existing == item:
4521                         return existing
4522                 return default
4523
4524         def match_pkgs(self, atom):
4525                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4526
4527         def _clear_cache(self):
4528                 if self._categories is not None:
4529                         self._categories = None
4530                 if self._match_cache:
4531                         self._match_cache = {}
4532
4533         def match(self, origdep, use_cache=1):
4534                 result = self._match_cache.get(origdep)
4535                 if result is not None:
4536                         return result[:]
4537                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4538                 self._match_cache[origdep] = result
4539                 return result[:]
4540
4541         def cpv_exists(self, cpv):
4542                 return cpv in self._cpv_map
4543
4544         def cp_list(self, mycp, use_cache=1):
4545                 cachelist = self._match_cache.get(mycp)
4546                 # cp_list() doesn't expand old-style virtuals
4547                 if cachelist and cachelist[0].startswith(mycp):
4548                         return cachelist[:]
4549                 cpv_list = self._cp_map.get(mycp)
4550                 if cpv_list is None:
4551                         cpv_list = []
4552                 else:
4553                         cpv_list = [pkg.cpv for pkg in cpv_list]
4554                 self._cpv_sort_ascending(cpv_list)
4555                 if not (not cpv_list and mycp.startswith("virtual/")):
4556                         self._match_cache[mycp] = cpv_list
4557                 return cpv_list[:]
4558
4559         def cp_all(self):
4560                 return list(self._cp_map)
4561
4562         def cpv_all(self):
4563                 return list(self._cpv_map)
4564
4565         def cpv_inject(self, pkg):
4566                 cp_list = self._cp_map.get(pkg.cp)
4567                 if cp_list is None:
4568                         cp_list = []
4569                         self._cp_map[pkg.cp] = cp_list
4570                 e_pkg = self._cpv_map.get(pkg.cpv)
4571                 if e_pkg is not None:
4572                         if e_pkg == pkg:
4573                                 return
4574                         self.cpv_remove(e_pkg)
4575                 for e_pkg in cp_list:
4576                         if e_pkg.slot_atom == pkg.slot_atom:
4577                                 if e_pkg == pkg:
4578                                         return
4579                                 self.cpv_remove(e_pkg)
4580                                 break
4581                 cp_list.append(pkg)
4582                 self._cpv_map[pkg.cpv] = pkg
4583                 self._clear_cache()
4584
4585         def cpv_remove(self, pkg):
4586                 old_pkg = self._cpv_map.get(pkg.cpv)
4587                 if old_pkg != pkg:
4588                         raise KeyError(pkg)
4589                 self._cp_map[pkg.cp].remove(pkg)
4590                 del self._cpv_map[pkg.cpv]
4591                 self._clear_cache()
4592
4593         def aux_get(self, cpv, wants):
4594                 metadata = self._cpv_map[cpv].metadata
4595                 return [metadata.get(x, "") for x in wants]
4596
4597         def aux_update(self, cpv, values):
4598                 self._cpv_map[cpv].metadata.update(values)
4599                 self._clear_cache()
4600
4601 class depgraph(object):
4602
4603         pkg_tree_map = RootConfig.pkg_tree_map
4604
4605         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4606
4607         def __init__(self, settings, trees, myopts, myparams, spinner):
4608                 self.settings = settings
4609                 self.target_root = settings["ROOT"]
4610                 self.myopts = myopts
4611                 self.myparams = myparams
4612                 self.edebug = 0
4613                 if settings.get("PORTAGE_DEBUG", "") == "1":
4614                         self.edebug = 1
4615                 self.spinner = spinner
4616                 self._running_root = trees["/"]["root_config"]
4617                 self._opts_no_restart = Scheduler._opts_no_restart
4618                 self.pkgsettings = {}
4619                 # Maps slot atom to package for each Package added to the graph.
4620                 self._slot_pkg_map = {}
4621                 # Maps nodes to the reasons they were selected for reinstallation.
4622                 self._reinstall_nodes = {}
4623                 self.mydbapi = {}
4624                 self.trees = {}
4625                 self._trees_orig = trees
4626                 self.roots = {}
4627                 # Contains a filtered view of preferred packages that are selected
4628                 # from available repositories.
4629                 self._filtered_trees = {}
4630                 # Contains installed packages and new packages that have been added
4631                 # to the graph.
4632                 self._graph_trees = {}
4633                 # All Package instances
4634                 self._pkg_cache = {}
4635                 for myroot in trees:
4636                         self.trees[myroot] = {}
4637                         # Create a RootConfig instance that references
4638                         # the FakeVartree instead of the real one.
4639                         self.roots[myroot] = RootConfig(
4640                                 trees[myroot]["vartree"].settings,
4641                                 self.trees[myroot],
4642                                 trees[myroot]["root_config"].setconfig)
4643                         for tree in ("porttree", "bintree"):
4644                                 self.trees[myroot][tree] = trees[myroot][tree]
4645                         self.trees[myroot]["vartree"] = \
4646                                 FakeVartree(trees[myroot]["root_config"],
4647                                         pkg_cache=self._pkg_cache)
4648                         self.pkgsettings[myroot] = portage.config(
4649                                 clone=self.trees[myroot]["vartree"].settings)
4650                         self._slot_pkg_map[myroot] = {}
4651                         vardb = self.trees[myroot]["vartree"].dbapi
4652                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4653                                 "--buildpkgonly" not in self.myopts
4654                         # This fakedbapi instance will model the state that the vdb will
4655                         # have after new packages have been installed.
4656                         fakedb = PackageVirtualDbapi(vardb.settings)
4657                         if preload_installed_pkgs:
4658                                 for pkg in vardb:
4659                                         self.spinner.update()
4660                                         # This triggers metadata updates via FakeVartree.
4661                                         vardb.aux_get(pkg.cpv, [])
4662                                         fakedb.cpv_inject(pkg)
4663
4664                         # Now that the vardb state is cached in our FakeVartree,
4665                         # we won't be needing the real vartree cache for awhile.
4666                         # To make some room on the heap, clear the vardbapi
4667                         # caches.
4668                         trees[myroot]["vartree"].dbapi._clear_cache()
4669                         gc.collect()
4670
4671                         self.mydbapi[myroot] = fakedb
4672                         def graph_tree():
4673                                 pass
4674                         graph_tree.dbapi = fakedb
4675                         self._graph_trees[myroot] = {}
4676                         self._filtered_trees[myroot] = {}
4677                         # Substitute the graph tree for the vartree in dep_check() since we
4678                         # want atom selections to be consistent with package selections
4679                         # have already been made.
4680                         self._graph_trees[myroot]["porttree"]   = graph_tree
4681                         self._graph_trees[myroot]["vartree"]    = graph_tree
4682                         def filtered_tree():
4683                                 pass
4684                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4685                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4686
4687                         # Passing in graph_tree as the vartree here could lead to better
4688                         # atom selections in some cases by causing atoms for packages that
4689                         # have been added to the graph to be preferred over other choices.
4690                         # However, it can trigger atom selections that result in
4691                         # unresolvable direct circular dependencies. For example, this
4692                         # happens with gwydion-dylan which depends on either itself or
4693                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4694                         # gwydion-dylan-bin needs to be selected in order to avoid a
4695                         # an unresolvable direct circular dependency.
4696                         #
4697                         # To solve the problem described above, pass in "graph_db" so that
4698                         # packages that have been added to the graph are distinguishable
4699                         # from other available packages and installed packages. Also, pass
4700                         # the parent package into self._select_atoms() calls so that
4701                         # unresolvable direct circular dependencies can be detected and
4702                         # avoided when possible.
4703                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4704                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4705
4706                         dbs = []
4707                         portdb = self.trees[myroot]["porttree"].dbapi
4708                         bindb  = self.trees[myroot]["bintree"].dbapi
4709                         vardb  = self.trees[myroot]["vartree"].dbapi
4710                         #               (db, pkg_type, built, installed, db_keys)
4711                         if "--usepkgonly" not in self.myopts:
4712                                 db_keys = list(portdb._aux_cache_keys)
4713                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4714                         if "--usepkg" in self.myopts:
4715                                 db_keys = list(bindb._aux_cache_keys)
4716                                 dbs.append((bindb,  "binary", True, False, db_keys))
4717                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4718                         dbs.append((vardb, "installed", True, True, db_keys))
4719                         self._filtered_trees[myroot]["dbs"] = dbs
4720                         if "--usepkg" in self.myopts:
4721                                 self.trees[myroot]["bintree"].populate(
4722                                         "--getbinpkg" in self.myopts,
4723                                         "--getbinpkgonly" in self.myopts)
4724                 del trees
4725
4726                 self.digraph=portage.digraph()
4727                 # contains all sets added to the graph
4728                 self._sets = {}
4729                 # contains atoms given as arguments
4730                 self._sets["args"] = InternalPackageSet()
4731                 # contains all atoms from all sets added to the graph, including
4732                 # atoms given as arguments
4733                 self._set_atoms = InternalPackageSet()
4734                 self._atom_arg_map = {}
4735                 # contains all nodes pulled in by self._set_atoms
4736                 self._set_nodes = set()
4737                 # Contains only Blocker -> Uninstall edges
4738                 self._blocker_uninstalls = digraph()
4739                 # Contains only Package -> Blocker edges
4740                 self._blocker_parents = digraph()
4741                 # Contains only irrelevant Package -> Blocker edges
4742                 self._irrelevant_blockers = digraph()
4743                 # Contains only unsolvable Package -> Blocker edges
4744                 self._unsolvable_blockers = digraph()
4745                 # Contains all Blocker -> Blocked Package edges
4746                 self._blocked_pkgs = digraph()
4747                 # Contains world packages that have been protected from
4748                 # uninstallation but may not have been added to the graph
4749                 # if the graph is not complete yet.
4750                 self._blocked_world_pkgs = {}
4751                 self._slot_collision_info = {}
4752                 # Slot collision nodes are not allowed to block other packages since
4753                 # blocker validation is only able to account for one package per slot.
4754                 self._slot_collision_nodes = set()
4755                 self._parent_atoms = {}
4756                 self._slot_conflict_parent_atoms = set()
4757                 self._serialized_tasks_cache = None
4758                 self._scheduler_graph = None
4759                 self._displayed_list = None
4760                 self._pprovided_args = []
4761                 self._missing_args = []
4762                 self._masked_installed = set()
4763                 self._unsatisfied_deps_for_display = []
4764                 self._unsatisfied_blockers_for_display = None
4765                 self._circular_deps_for_display = None
4766                 self._dep_stack = []
4767                 self._unsatisfied_deps = []
4768                 self._initially_unsatisfied_deps = []
4769                 self._ignored_deps = []
4770                 self._required_set_names = set(["system", "world"])
4771                 self._select_atoms = self._select_atoms_highest_available
4772                 self._select_package = self._select_pkg_highest_available
4773                 self._highest_pkg_cache = {}
4774
4775         def _show_slot_collision_notice(self):
4776                 """Show an informational message advising the user to mask one of the
4777                 the packages. In some cases it may be possible to resolve this
4778                 automatically, but support for backtracking (removal nodes that have
4779                 already been selected) will be required in order to handle all possible
4780                 cases.
4781                 """
4782
4783                 if not self._slot_collision_info:
4784                         return
4785
4786                 self._show_merge_list()
4787
4788                 msg = []
4789                 msg.append("\n!!! Multiple package instances within a single " + \
4790                         "package slot have been pulled\n")
4791                 msg.append("!!! into the dependency graph, resulting" + \
4792                         " in a slot conflict:\n\n")
4793                 indent = "  "
4794                 # Max number of parents shown, to avoid flooding the display.
4795                 max_parents = 3
4796                 explanation_columns = 70
4797                 explanations = 0
4798                 for (slot_atom, root), slot_nodes \
4799                         in self._slot_collision_info.iteritems():
4800                         msg.append(str(slot_atom))
4801                         msg.append("\n\n")
4802
4803                         for node in slot_nodes:
4804                                 msg.append(indent)
4805                                 msg.append(str(node))
4806                                 parent_atoms = self._parent_atoms.get(node)
4807                                 if parent_atoms:
4808                                         pruned_list = set()
4809                                         # Prefer conflict atoms over others.
4810                                         for parent_atom in parent_atoms:
4811                                                 if len(pruned_list) >= max_parents:
4812                                                         break
4813                                                 if parent_atom in self._slot_conflict_parent_atoms:
4814                                                         pruned_list.add(parent_atom)
4815
4816                                         # If this package was pulled in by conflict atoms then
4817                                         # show those alone since those are the most interesting.
4818                                         if not pruned_list:
4819                                                 # When generating the pruned list, prefer instances
4820                                                 # of DependencyArg over instances of Package.
4821                                                 for parent_atom in parent_atoms:
4822                                                         if len(pruned_list) >= max_parents:
4823                                                                 break
4824                                                         parent, atom = parent_atom
4825                                                         if isinstance(parent, DependencyArg):
4826                                                                 pruned_list.add(parent_atom)
4827                                                 # Prefer Packages instances that themselves have been
4828                                                 # pulled into collision slots.
4829                                                 for parent_atom in parent_atoms:
4830                                                         if len(pruned_list) >= max_parents:
4831                                                                 break
4832                                                         parent, atom = parent_atom
4833                                                         if isinstance(parent, Package) and \
4834                                                                 (parent.slot_atom, parent.root) \
4835                                                                 in self._slot_collision_info:
4836                                                                 pruned_list.add(parent_atom)
4837                                                 for parent_atom in parent_atoms:
4838                                                         if len(pruned_list) >= max_parents:
4839                                                                 break
4840                                                         pruned_list.add(parent_atom)
4841                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4842                                         parent_atoms = pruned_list
4843                                         msg.append(" pulled in by\n")
4844                                         for parent_atom in parent_atoms:
4845                                                 parent, atom = parent_atom
4846                                                 msg.append(2*indent)
4847                                                 if isinstance(parent,
4848                                                         (PackageArg, AtomArg)):
4849                                                         # For PackageArg and AtomArg types, it's
4850                                                         # redundant to display the atom attribute.
4851                                                         msg.append(str(parent))
4852                                                 else:
4853                                                         # Display the specific atom from SetArg or
4854                                                         # Package types.
4855                                                         msg.append("%s required by %s" % (atom, parent))
4856                                                 msg.append("\n")
4857                                         if omitted_parents:
4858                                                 msg.append(2*indent)
4859                                                 msg.append("(and %d more)\n" % omitted_parents)
4860                                 else:
4861                                         msg.append(" (no parents)\n")
4862                                 msg.append("\n")
4863                         explanation = self._slot_conflict_explanation(slot_nodes)
4864                         if explanation:
4865                                 explanations += 1
4866                                 msg.append(indent + "Explanation:\n\n")
4867                                 for line in textwrap.wrap(explanation, explanation_columns):
4868                                         msg.append(2*indent + line + "\n")
4869                                 msg.append("\n")
4870                 msg.append("\n")
4871                 sys.stderr.write("".join(msg))
4872                 sys.stderr.flush()
4873
4874                 explanations_for_all = explanations == len(self._slot_collision_info)
4875
4876                 if explanations_for_all or "--quiet" in self.myopts:
4877                         return
4878
4879                 msg = []
4880                 msg.append("It may be possible to solve this problem ")
4881                 msg.append("by using package.mask to prevent one of ")
4882                 msg.append("those packages from being selected. ")
4883                 msg.append("However, it is also possible that conflicting ")
4884                 msg.append("dependencies exist such that they are impossible to ")
4885                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4886                 msg.append("the dependencies of two different packages, then those ")
4887                 msg.append("packages can not be installed simultaneously.")
4888
4889                 from formatter import AbstractFormatter, DumbWriter
4890                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4891                 for x in msg:
4892                         f.add_flowing_data(x)
4893                 f.end_paragraph(1)
4894
4895                 msg = []
4896                 msg.append("For more information, see MASKED PACKAGES ")
4897                 msg.append("section in the emerge man page or refer ")
4898                 msg.append("to the Gentoo Handbook.")
4899                 for x in msg:
4900                         f.add_flowing_data(x)
4901                 f.end_paragraph(1)
4902                 f.writer.flush()
4903
4904         def _slot_conflict_explanation(self, slot_nodes):
4905                 """
4906                 When a slot conflict occurs due to USE deps, there are a few
4907                 different cases to consider:
4908
4909                 1) New USE are correctly set but --newuse wasn't requested so an
4910                    installed package with incorrect USE happened to get pulled
4911                    into graph before the new one.
4912
4913                 2) New USE are incorrectly set but an installed package has correct
4914                    USE so it got pulled into the graph, and a new instance also got
4915                    pulled in due to --newuse or an upgrade.
4916
4917                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4918                    and multiple package instances got pulled into the same slot to
4919                    satisfy the conflicting deps.
4920
4921                 Currently, explanations and suggested courses of action are generated
4922                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4923                 """
4924
4925                 if len(slot_nodes) != 2:
4926                         # Suggestions are only implemented for
4927                         # conflicts between two packages.
4928                         return None
4929
4930                 all_conflict_atoms = self._slot_conflict_parent_atoms
4931                 matched_node = None
4932                 matched_atoms = None
4933                 unmatched_node = None
4934                 for node in slot_nodes:
4935                         parent_atoms = self._parent_atoms.get(node)
4936                         if not parent_atoms:
4937                                 # Normally, there are always parent atoms. If there are
4938                                 # none then something unexpected is happening and there's
4939                                 # currently no suggestion for this case.
4940                                 return None
4941                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4942                         for parent_atom in conflict_atoms:
4943                                 parent, atom = parent_atom
4944                                 if not atom.use:
4945                                         # Suggestions are currently only implemented for cases
4946                                         # in which all conflict atoms have USE deps.
4947                                         return None
4948                         if conflict_atoms:
4949                                 if matched_node is not None:
4950                                         # If conflict atoms match multiple nodes
4951                                         # then there's no suggestion.
4952                                         return None
4953                                 matched_node = node
4954                                 matched_atoms = conflict_atoms
4955                         else:
4956                                 if unmatched_node is not None:
4957                                         # Neither node is matched by conflict atoms, and
4958                                         # there is no suggestion for this case.
4959                                         return None
4960                                 unmatched_node = node
4961
4962                 if matched_node is None or unmatched_node is None:
4963                         # This shouldn't happen.
4964                         return None
4965
4966                 if unmatched_node.installed and not matched_node.installed and \
4967                         unmatched_node.cpv == matched_node.cpv:
4968                         # If the conflicting packages are the same version then
4969                         # --newuse should be all that's needed. If they are different
4970                         # versions then there's some other problem.
4971                         return "New USE are correctly set, but --newuse wasn't" + \
4972                                 " requested, so an installed package with incorrect USE " + \
4973                                 "happened to get pulled into the dependency graph. " + \
4974                                 "In order to solve " + \
4975                                 "this, either specify the --newuse option or explicitly " + \
4976                                 " reinstall '%s'." % matched_node.slot_atom
4977
4978                 if matched_node.installed and not unmatched_node.installed:
4979                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4980                         explanation = ("New USE for '%s' are incorrectly set. " + \
4981                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4982                                 (matched_node.slot_atom, atoms[0])
4983                         if len(atoms) > 1:
4984                                 for atom in atoms[1:-1]:
4985                                         explanation += ", '%s'" % (atom,)
4986                                 if len(atoms) > 2:
4987                                         explanation += ","
4988                                 explanation += " and '%s'" % (atoms[-1],)
4989                         explanation += "."
4990                         return explanation
4991
4992                 return None
4993
4994         def _process_slot_conflicts(self):
4995                 """
4996                 Process slot conflict data to identify specific atoms which
4997                 lead to conflict. These atoms only match a subset of the
4998                 packages that have been pulled into a given slot.
4999                 """
5000                 for (slot_atom, root), slot_nodes \
5001                         in self._slot_collision_info.iteritems():
5002
5003                         all_parent_atoms = set()
5004                         for pkg in slot_nodes:
5005                                 parent_atoms = self._parent_atoms.get(pkg)
5006                                 if not parent_atoms:
5007                                         continue
5008                                 all_parent_atoms.update(parent_atoms)
5009
5010                         for pkg in slot_nodes:
5011                                 parent_atoms = self._parent_atoms.get(pkg)
5012                                 if parent_atoms is None:
5013                                         parent_atoms = set()
5014                                         self._parent_atoms[pkg] = parent_atoms
5015                                 for parent_atom in all_parent_atoms:
5016                                         if parent_atom in parent_atoms:
5017                                                 continue
5018                                         # Use package set for matching since it will match via
5019                                         # PROVIDE when necessary, while match_from_list does not.
5020                                         parent, atom = parent_atom
5021                                         atom_set = InternalPackageSet(
5022                                                 initial_atoms=(atom,))
5023                                         if atom_set.findAtomForPackage(pkg):
5024                                                 parent_atoms.add(parent_atom)
5025                                         else:
5026                                                 self._slot_conflict_parent_atoms.add(parent_atom)
5027
5028         def _reinstall_for_flags(self, forced_flags,
5029                 orig_use, orig_iuse, cur_use, cur_iuse):
5030                 """Return a set of flags that trigger reinstallation, or None if there
5031                 are no such flags."""
5032                 if "--newuse" in self.myopts:
5033                         flags = set(orig_iuse.symmetric_difference(
5034                                 cur_iuse).difference(forced_flags))
5035                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5036                                 cur_iuse.intersection(cur_use)))
5037                         if flags:
5038                                 return flags
5039                 elif "changed-use" == self.myopts.get("--reinstall"):
5040                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
5041                                 cur_iuse.intersection(cur_use))
5042                         if flags:
5043                                 return flags
5044                 return None
5045
5046         def _create_graph(self, allow_unsatisfied=False):
5047                 dep_stack = self._dep_stack
5048                 while dep_stack:
5049                         self.spinner.update()
5050                         dep = dep_stack.pop()
5051                         if isinstance(dep, Package):
5052                                 if not self._add_pkg_deps(dep,
5053                                         allow_unsatisfied=allow_unsatisfied):
5054                                         return 0
5055                                 continue
5056                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5057                                 return 0
5058                 return 1
5059
5060         def _add_dep(self, dep, allow_unsatisfied=False):
5061                 debug = "--debug" in self.myopts
5062                 buildpkgonly = "--buildpkgonly" in self.myopts
5063                 nodeps = "--nodeps" in self.myopts
5064                 empty = "empty" in self.myparams
5065                 deep = "deep" in self.myparams
5066                 update = "--update" in self.myopts and dep.depth <= 1
5067                 if dep.blocker:
5068                         if not buildpkgonly and \
5069                                 not nodeps and \
5070                                 dep.parent not in self._slot_collision_nodes:
5071                                 if dep.parent.onlydeps:
5072                                         # It's safe to ignore blockers if the
5073                                         # parent is an --onlydeps node.
5074                                         return 1
5075                                 # The blocker applies to the root where
5076                                 # the parent is or will be installed.
5077                                 blocker = Blocker(atom=dep.atom,
5078                                         eapi=dep.parent.metadata["EAPI"],
5079                                         root=dep.parent.root)
5080                                 self._blocker_parents.add(blocker, dep.parent)
5081                         return 1
5082                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5083                         onlydeps=dep.onlydeps)
5084                 if not dep_pkg:
5085                         if dep.priority.optional:
5086                                 # This could be an unecessary build-time dep
5087                                 # pulled in by --with-bdeps=y.
5088                                 return 1
5089                         if allow_unsatisfied:
5090                                 self._unsatisfied_deps.append(dep)
5091                                 return 1
5092                         self._unsatisfied_deps_for_display.append(
5093                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5094                         return 0
5095                 # In some cases, dep_check will return deps that shouldn't
5096                 # be proccessed any further, so they are identified and
5097                 # discarded here. Try to discard as few as possible since
5098                 # discarded dependencies reduce the amount of information
5099                 # available for optimization of merge order.
5100                 if dep.priority.satisfied and \
5101                         not dep_pkg.installed and \
5102                         not (existing_node or empty or deep or update):
5103                         myarg = None
5104                         if dep.root == self.target_root:
5105                                 try:
5106                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5107                                 except StopIteration:
5108                                         pass
5109                                 except portage.exception.InvalidDependString:
5110                                         if not dep_pkg.installed:
5111                                                 # This shouldn't happen since the package
5112                                                 # should have been masked.
5113                                                 raise
5114                         if not myarg:
5115                                 self._ignored_deps.append(dep)
5116                                 return 1
5117
5118                 if not self._add_pkg(dep_pkg, dep):
5119                         return 0
5120                 return 1
5121
5122         def _add_pkg(self, pkg, dep):
5123                 myparent = None
5124                 priority = None
5125                 depth = 0
5126                 if dep is None:
5127                         dep = Dependency()
5128                 else:
5129                         myparent = dep.parent
5130                         priority = dep.priority
5131                         depth = dep.depth
5132                 if priority is None:
5133                         priority = DepPriority()
5134                 """
5135                 Fills the digraph with nodes comprised of packages to merge.
5136                 mybigkey is the package spec of the package to merge.
5137                 myparent is the package depending on mybigkey ( or None )
5138                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5139                         Think --onlydeps, we need to ignore packages in that case.
5140                 #stuff to add:
5141                 #SLOT-aware emerge
5142                 #IUSE-aware emerge -> USE DEP aware depgraph
5143                 #"no downgrade" emerge
5144                 """
5145                 # Ensure that the dependencies of the same package
5146                 # are never processed more than once.
5147                 previously_added = pkg in self.digraph
5148
5149                 # select the correct /var database that we'll be checking against
5150                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5151                 pkgsettings = self.pkgsettings[pkg.root]
5152
5153                 arg_atoms = None
5154                 if True:
5155                         try:
5156                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5157                         except portage.exception.InvalidDependString, e:
5158                                 if not pkg.installed:
5159                                         show_invalid_depstring_notice(
5160                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5161                                         return 0
5162                                 del e
5163
5164                 if not pkg.onlydeps:
5165                         if not pkg.installed and \
5166                                 "empty" not in self.myparams and \
5167                                 vardbapi.match(pkg.slot_atom):
5168                                 # Increase the priority of dependencies on packages that
5169                                 # are being rebuilt. This optimizes merge order so that
5170                                 # dependencies are rebuilt/updated as soon as possible,
5171                                 # which is needed especially when emerge is called by
5172                                 # revdep-rebuild since dependencies may be affected by ABI
5173                                 # breakage that has rendered them useless. Don't adjust
5174                                 # priority here when in "empty" mode since all packages
5175                                 # are being merged in that case.
5176                                 priority.rebuild = True
5177
5178                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5179                         slot_collision = False
5180                         if existing_node:
5181                                 existing_node_matches = pkg.cpv == existing_node.cpv
5182                                 if existing_node_matches and \
5183                                         pkg != existing_node and \
5184                                         dep.atom is not None:
5185                                         # Use package set for matching since it will match via
5186                                         # PROVIDE when necessary, while match_from_list does not.
5187                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5188                                         if not atom_set.findAtomForPackage(existing_node):
5189                                                 existing_node_matches = False
5190                                 if existing_node_matches:
5191                                         # The existing node can be reused.
5192                                         if arg_atoms:
5193                                                 for parent_atom in arg_atoms:
5194                                                         parent, atom = parent_atom
5195                                                         self.digraph.add(existing_node, parent,
5196                                                                 priority=priority)
5197                                                         self._add_parent_atom(existing_node, parent_atom)
5198                                         # If a direct circular dependency is not an unsatisfied
5199                                         # buildtime dependency then drop it here since otherwise
5200                                         # it can skew the merge order calculation in an unwanted
5201                                         # way.
5202                                         if existing_node != myparent or \
5203                                                 (priority.buildtime and not priority.satisfied):
5204                                                 self.digraph.addnode(existing_node, myparent,
5205                                                         priority=priority)
5206                                                 if dep.atom is not None and dep.parent is not None:
5207                                                         self._add_parent_atom(existing_node,
5208                                                                 (dep.parent, dep.atom))
5209                                         return 1
5210                                 else:
5211
5212                                         # A slot collision has occurred.  Sometimes this coincides
5213                                         # with unresolvable blockers, so the slot collision will be
5214                                         # shown later if there are no unresolvable blockers.
5215                                         self._add_slot_conflict(pkg)
5216                                         slot_collision = True
5217
5218                         if slot_collision:
5219                                 # Now add this node to the graph so that self.display()
5220                                 # can show use flags and --tree portage.output.  This node is
5221                                 # only being partially added to the graph.  It must not be
5222                                 # allowed to interfere with the other nodes that have been
5223                                 # added.  Do not overwrite data for existing nodes in
5224                                 # self.mydbapi since that data will be used for blocker
5225                                 # validation.
5226                                 # Even though the graph is now invalid, continue to process
5227                                 # dependencies so that things like --fetchonly can still
5228                                 # function despite collisions.
5229                                 pass
5230                         elif not previously_added:
5231                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5232                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5233                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5234
5235                         if not pkg.installed:
5236                                 # Allow this package to satisfy old-style virtuals in case it
5237                                 # doesn't already. Any pre-existing providers will be preferred
5238                                 # over this one.
5239                                 try:
5240                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5241                                         # For consistency, also update the global virtuals.
5242                                         settings = self.roots[pkg.root].settings
5243                                         settings.unlock()
5244                                         settings.setinst(pkg.cpv, pkg.metadata)
5245                                         settings.lock()
5246                                 except portage.exception.InvalidDependString, e:
5247                                         show_invalid_depstring_notice(
5248                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5249                                         del e
5250                                         return 0
5251
5252                 if arg_atoms:
5253                         self._set_nodes.add(pkg)
5254
5255                 # Do this even when addme is False (--onlydeps) so that the
5256                 # parent/child relationship is always known in case
5257                 # self._show_slot_collision_notice() needs to be called later.
5258                 self.digraph.add(pkg, myparent, priority=priority)
5259                 if dep.atom is not None and dep.parent is not None:
5260                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5261
5262                 if arg_atoms:
5263                         for parent_atom in arg_atoms:
5264                                 parent, atom = parent_atom
5265                                 self.digraph.add(pkg, parent, priority=priority)
5266                                 self._add_parent_atom(pkg, parent_atom)
5267
5268                 """ This section determines whether we go deeper into dependencies or not.
5269                     We want to go deeper on a few occasions:
5270                     Installing package A, we need to make sure package A's deps are met.
5271                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5272                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5273                 """
5274                 dep_stack = self._dep_stack
5275                 if "recurse" not in self.myparams:
5276                         return 1
5277                 elif pkg.installed and \
5278                         "deep" not in self.myparams:
5279                         dep_stack = self._ignored_deps
5280
5281                 self.spinner.update()
5282
5283                 if arg_atoms:
5284                         depth = 0
5285                 pkg.depth = depth
5286                 if not previously_added:
5287                         dep_stack.append(pkg)
5288                 return 1
5289
5290         def _add_parent_atom(self, pkg, parent_atom):
5291                 parent_atoms = self._parent_atoms.get(pkg)
5292                 if parent_atoms is None:
5293                         parent_atoms = set()
5294                         self._parent_atoms[pkg] = parent_atoms
5295                 parent_atoms.add(parent_atom)
5296
5297         def _add_slot_conflict(self, pkg):
5298                 self._slot_collision_nodes.add(pkg)
5299                 slot_key = (pkg.slot_atom, pkg.root)
5300                 slot_nodes = self._slot_collision_info.get(slot_key)
5301                 if slot_nodes is None:
5302                         slot_nodes = set()
5303                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5304                         self._slot_collision_info[slot_key] = slot_nodes
5305                 slot_nodes.add(pkg)
5306
5307         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5308
5309                 mytype = pkg.type_name
5310                 myroot = pkg.root
5311                 mykey = pkg.cpv
5312                 metadata = pkg.metadata
5313                 myuse = pkg.use.enabled
5314                 jbigkey = pkg
5315                 depth = pkg.depth + 1
5316                 removal_action = "remove" in self.myparams
5317
5318                 edepend={}
5319                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5320                 for k in depkeys:
5321                         edepend[k] = metadata[k]
5322
5323                 if not pkg.built and \
5324                         "--buildpkgonly" in self.myopts and \
5325                         "deep" not in self.myparams and \
5326                         "empty" not in self.myparams:
5327                         edepend["RDEPEND"] = ""
5328                         edepend["PDEPEND"] = ""
5329                 bdeps_optional = False
5330
5331                 if pkg.built and not removal_action:
5332                         if self.myopts.get("--with-bdeps", "n") == "y":
5333                                 # Pull in build time deps as requested, but marked them as
5334                                 # "optional" since they are not strictly required. This allows
5335                                 # more freedom in the merge order calculation for solving
5336                                 # circular dependencies. Don't convert to PDEPEND since that
5337                                 # could make --with-bdeps=y less effective if it is used to
5338                                 # adjust merge order to prevent built_with_use() calls from
5339                                 # failing.
5340                                 bdeps_optional = True
5341                         else:
5342                                 # built packages do not have build time dependencies.
5343                                 edepend["DEPEND"] = ""
5344
5345                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5346                         edepend["DEPEND"] = ""
5347
5348                 bdeps_root = "/"
5349                 root_deps = self.myopts.get("--root-deps")
5350                 if root_deps is not None:
5351                         if root_deps is True:
5352                                 bdeps_root = myroot
5353                         elif root_deps == "rdeps":
5354                                 edepend["DEPEND"] = ""
5355
5356                 deps = (
5357                         (bdeps_root, edepend["DEPEND"],
5358                                 self._priority(buildtime=(not bdeps_optional),
5359                                 optional=bdeps_optional)),
5360                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5361                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5362                 )
5363
5364                 debug = "--debug" in self.myopts
5365                 strict = mytype != "installed"
5366                 try:
5367                         for dep_root, dep_string, dep_priority in deps:
5368                                 if not dep_string:
5369                                         continue
5370                                 if debug:
5371                                         print
5372                                         print "Parent:   ", jbigkey
5373                                         print "Depstring:", dep_string
5374                                         print "Priority:", dep_priority
5375                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5376                                 try:
5377                                         selected_atoms = self._select_atoms(dep_root,
5378                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5379                                                 priority=dep_priority)
5380                                 except portage.exception.InvalidDependString, e:
5381                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5382                                         return 0
5383                                 if debug:
5384                                         print "Candidates:", selected_atoms
5385
5386                                 for atom in selected_atoms:
5387                                         try:
5388
5389                                                 atom = portage.dep.Atom(atom)
5390
5391                                                 mypriority = dep_priority.copy()
5392                                                 if not atom.blocker and vardb.match(atom):
5393                                                         mypriority.satisfied = True
5394
5395                                                 if not self._add_dep(Dependency(atom=atom,
5396                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5397                                                         priority=mypriority, root=dep_root),
5398                                                         allow_unsatisfied=allow_unsatisfied):
5399                                                         return 0
5400
5401                                         except portage.exception.InvalidAtom, e:
5402                                                 show_invalid_depstring_notice(
5403                                                         pkg, dep_string, str(e))
5404                                                 del e
5405                                                 if not pkg.installed:
5406                                                         return 0
5407
5408                                 if debug:
5409                                         print "Exiting...", jbigkey
5410                 except portage.exception.AmbiguousPackageName, e:
5411                         pkgs = e.args[0]
5412                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5413                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5414                         for cpv in pkgs:
5415                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5416                         portage.writemsg("\n", noiselevel=-1)
5417                         if mytype == "binary":
5418                                 portage.writemsg(
5419                                         "!!! This binary package cannot be installed: '%s'\n" % \
5420                                         mykey, noiselevel=-1)
5421                         elif mytype == "ebuild":
5422                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5423                                 myebuild, mylocation = portdb.findname2(mykey)
5424                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5425                                         "'%s'\n" % myebuild, noiselevel=-1)
5426                         portage.writemsg("!!! Please notify the package maintainer " + \
5427                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5428                         return 0
5429                 return 1
5430
5431         def _priority(self, **kwargs):
5432                 if "remove" in self.myparams:
5433                         priority_constructor = UnmergeDepPriority
5434                 else:
5435                         priority_constructor = DepPriority
5436                 return priority_constructor(**kwargs)
5437
5438         def _dep_expand(self, root_config, atom_without_category):
5439                 """
5440                 @param root_config: a root config instance
5441                 @type root_config: RootConfig
5442                 @param atom_without_category: an atom without a category component
5443                 @type atom_without_category: String
5444                 @rtype: list
5445                 @returns: a list of atoms containing categories (possibly empty)
5446                 """
5447                 null_cp = portage.dep_getkey(insert_category_into_atom(
5448                         atom_without_category, "null"))
5449                 cat, atom_pn = portage.catsplit(null_cp)
5450
5451                 dbs = self._filtered_trees[root_config.root]["dbs"]
5452                 categories = set()
5453                 for db, pkg_type, built, installed, db_keys in dbs:
5454                         for cat in db.categories:
5455                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5456                                         categories.add(cat)
5457
5458                 deps = []
5459                 for cat in categories:
5460                         deps.append(insert_category_into_atom(
5461                                 atom_without_category, cat))
5462                 return deps
5463
5464         def _have_new_virt(self, root, atom_cp):
5465                 ret = False
5466                 for db, pkg_type, built, installed, db_keys in \
5467                         self._filtered_trees[root]["dbs"]:
5468                         if db.cp_list(atom_cp):
5469                                 ret = True
5470                                 break
5471                 return ret
5472
5473         def _iter_atoms_for_pkg(self, pkg):
5474                 # TODO: add multiple $ROOT support
5475                 if pkg.root != self.target_root:
5476                         return
5477                 atom_arg_map = self._atom_arg_map
5478                 root_config = self.roots[pkg.root]
5479                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5480                         atom_cp = portage.dep_getkey(atom)
5481                         if atom_cp != pkg.cp and \
5482                                 self._have_new_virt(pkg.root, atom_cp):
5483                                 continue
5484                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5485                         visible_pkgs.reverse() # descending order
5486                         higher_slot = None
5487                         for visible_pkg in visible_pkgs:
5488                                 if visible_pkg.cp != atom_cp:
5489                                         continue
5490                                 if pkg >= visible_pkg:
5491                                         # This is descending order, and we're not
5492                                         # interested in any versions <= pkg given.
5493                                         break
5494                                 if pkg.slot_atom != visible_pkg.slot_atom:
5495                                         higher_slot = visible_pkg
5496                                         break
5497                         if higher_slot is not None:
5498                                 continue
5499                         for arg in atom_arg_map[(atom, pkg.root)]:
5500                                 if isinstance(arg, PackageArg) and \
5501                                         arg.package != pkg:
5502                                         continue
5503                                 yield arg, atom
5504
5505         def select_files(self, myfiles):
5506                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5507                 appropriate depgraph and return a favorite list."""
5508                 debug = "--debug" in self.myopts
5509                 root_config = self.roots[self.target_root]
5510                 sets = root_config.sets
5511                 getSetAtoms = root_config.setconfig.getSetAtoms
5512                 myfavorites=[]
5513                 myroot = self.target_root
5514                 dbs = self._filtered_trees[myroot]["dbs"]
5515                 vardb = self.trees[myroot]["vartree"].dbapi
5516                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5517                 portdb = self.trees[myroot]["porttree"].dbapi
5518                 bindb = self.trees[myroot]["bintree"].dbapi
5519                 pkgsettings = self.pkgsettings[myroot]
5520                 args = []
5521                 onlydeps = "--onlydeps" in self.myopts
5522                 lookup_owners = []
5523                 for x in myfiles:
5524                         ext = os.path.splitext(x)[1]
5525                         if ext==".tbz2":
5526                                 if not os.path.exists(x):
5527                                         if os.path.exists(
5528                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5529                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5530                                         elif os.path.exists(
5531                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5532                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5533                                         else:
5534                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5535                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5536                                                 return 0, myfavorites
5537                                 mytbz2=portage.xpak.tbz2(x)
5538                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5539                                 if os.path.realpath(x) != \
5540                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5541                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5542                                         return 0, myfavorites
5543                                 db_keys = list(bindb._aux_cache_keys)
5544                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5545                                 pkg = Package(type_name="binary", root_config=root_config,
5546                                         cpv=mykey, built=True, metadata=metadata,
5547                                         onlydeps=onlydeps)
5548                                 self._pkg_cache[pkg] = pkg
5549                                 args.append(PackageArg(arg=x, package=pkg,
5550                                         root_config=root_config))
5551                         elif ext==".ebuild":
5552                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5553                                 pkgdir = os.path.dirname(ebuild_path)
5554                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5555                                 cp = pkgdir[len(tree_root)+1:]
5556                                 e = portage.exception.PackageNotFound(
5557                                         ("%s is not in a valid portage tree " + \
5558                                         "hierarchy or does not exist") % x)
5559                                 if not portage.isvalidatom(cp):
5560                                         raise e
5561                                 cat = portage.catsplit(cp)[0]
5562                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5563                                 if not portage.isvalidatom("="+mykey):
5564                                         raise e
5565                                 ebuild_path = portdb.findname(mykey)
5566                                 if ebuild_path:
5567                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5568                                                 cp, os.path.basename(ebuild_path)):
5569                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5570                                                 return 0, myfavorites
5571                                         if mykey not in portdb.xmatch(
5572                                                 "match-visible", portage.dep_getkey(mykey)):
5573                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5574                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5575                                                 print colorize("BAD", "*** page for details.")
5576                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5577                                                         "Continuing...")
5578                                 else:
5579                                         raise portage.exception.PackageNotFound(
5580                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5581                                 db_keys = list(portdb._aux_cache_keys)
5582                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5583                                 pkg = Package(type_name="ebuild", root_config=root_config,
5584                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5585                                 pkgsettings.setcpv(pkg)
5586                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5587                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5588                                 self._pkg_cache[pkg] = pkg
5589                                 args.append(PackageArg(arg=x, package=pkg,
5590                                         root_config=root_config))
5591                         elif x.startswith(os.path.sep):
5592                                 if not x.startswith(myroot):
5593                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5594                                                 " $ROOT.\n") % x, noiselevel=-1)
5595                                         return 0, []
5596                                 # Queue these up since it's most efficient to handle
5597                                 # multiple files in a single iter_owners() call.
5598                                 lookup_owners.append(x)
5599                         else:
5600                                 if x in ("system", "world"):
5601                                         x = SETPREFIX + x
5602                                 if x.startswith(SETPREFIX):
5603                                         s = x[len(SETPREFIX):]
5604                                         if s not in sets:
5605                                                 raise portage.exception.PackageSetNotFound(s)
5606                                         if s in self._sets:
5607                                                 continue
5608                                         # Recursively expand sets so that containment tests in
5609                                         # self._get_parent_sets() properly match atoms in nested
5610                                         # sets (like if world contains system).
5611                                         expanded_set = InternalPackageSet(
5612                                                 initial_atoms=getSetAtoms(s))
5613                                         self._sets[s] = expanded_set
5614                                         args.append(SetArg(arg=x, set=expanded_set,
5615                                                 root_config=root_config))
5616                                         continue
5617                                 if not is_valid_package_atom(x):
5618                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5619                                                 noiselevel=-1)
5620                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5621                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5622                                         return (0,[])
5623                                 # Don't expand categories or old-style virtuals here unless
5624                                 # necessary. Expansion of old-style virtuals here causes at
5625                                 # least the following problems:
5626                                 #   1) It's more difficult to determine which set(s) an atom
5627                                 #      came from, if any.
5628                                 #   2) It takes away freedom from the resolver to choose other
5629                                 #      possible expansions when necessary.
5630                                 if "/" in x:
5631                                         args.append(AtomArg(arg=x, atom=x,
5632                                                 root_config=root_config))
5633                                         continue
5634                                 expanded_atoms = self._dep_expand(root_config, x)
5635                                 installed_cp_set = set()
5636                                 for atom in expanded_atoms:
5637                                         atom_cp = portage.dep_getkey(atom)
5638                                         if vardb.cp_list(atom_cp):
5639                                                 installed_cp_set.add(atom_cp)
5640
5641                                 if len(installed_cp_set) > 1:
5642                                         non_virtual_cps = set()
5643                                         for atom_cp in installed_cp_set:
5644                                                 if not atom_cp.startswith("virtual/"):
5645                                                         non_virtual_cps.add(atom_cp)
5646                                         if len(non_virtual_cps) == 1:
5647                                                 installed_cp_set = non_virtual_cps
5648
5649                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5650                                         installed_cp = iter(installed_cp_set).next()
5651                                         expanded_atoms = [atom for atom in expanded_atoms \
5652                                                 if portage.dep_getkey(atom) == installed_cp]
5653
5654                                 if len(expanded_atoms) > 1:
5655                                         print
5656                                         print
5657                                         ambiguous_package_name(x, expanded_atoms, root_config,
5658                                                 self.spinner, self.myopts)
5659                                         return False, myfavorites
5660                                 if expanded_atoms:
5661                                         atom = expanded_atoms[0]
5662                                 else:
5663                                         null_atom = insert_category_into_atom(x, "null")
5664                                         null_cp = portage.dep_getkey(null_atom)
5665                                         cat, atom_pn = portage.catsplit(null_cp)
5666                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5667                                         if virts_p:
5668                                                 # Allow the depgraph to choose which virtual.
5669                                                 atom = insert_category_into_atom(x, "virtual")
5670                                         else:
5671                                                 atom = insert_category_into_atom(x, "null")
5672
5673                                 args.append(AtomArg(arg=x, atom=atom,
5674                                         root_config=root_config))
5675
5676                 if lookup_owners:
5677                         relative_paths = []
5678                         search_for_multiple = False
5679                         if len(lookup_owners) > 1:
5680                                 search_for_multiple = True
5681
5682                         for x in lookup_owners:
5683                                 if not search_for_multiple and os.path.isdir(x):
5684                                         search_for_multiple = True
5685                                 relative_paths.append(x[len(myroot):])
5686
5687                         owners = set()
5688                         for pkg, relative_path in \
5689                                 real_vardb._owners.iter_owners(relative_paths):
5690                                 owners.add(pkg.mycpv)
5691                                 if not search_for_multiple:
5692                                         break
5693
5694                         if not owners:
5695                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5696                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5697                                 return 0, []
5698
5699                         for cpv in owners:
5700                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5701                                 if not slot:
5702                                         # portage now masks packages with missing slot, but it's
5703                                         # possible that one was installed by an older version
5704                                         atom = portage.cpv_getkey(cpv)
5705                                 else:
5706                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5707                                 args.append(AtomArg(arg=atom, atom=atom,
5708                                         root_config=root_config))
5709
5710                 if "--update" in self.myopts:
5711                         # In some cases, the greedy slots behavior can pull in a slot that
5712                         # the user would want to uninstall due to it being blocked by a
5713                         # newer version in a different slot. Therefore, it's necessary to
5714                         # detect and discard any that should be uninstalled. Each time
5715                         # that arguments are updated, package selections are repeated in
5716                         # order to ensure consistency with the current arguments:
5717                         #
5718                         #  1) Initialize args
5719                         #  2) Select packages and generate initial greedy atoms
5720                         #  3) Update args with greedy atoms
5721                         #  4) Select packages and generate greedy atoms again, while
5722                         #     accounting for any blockers between selected packages
5723                         #  5) Update args with revised greedy atoms
5724
5725                         self._set_args(args)
5726                         greedy_args = []
5727                         for arg in args:
5728                                 greedy_args.append(arg)
5729                                 if not isinstance(arg, AtomArg):
5730                                         continue
5731                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5732                                         greedy_args.append(
5733                                                 AtomArg(arg=arg.arg, atom=atom,
5734                                                         root_config=arg.root_config))
5735
5736                         self._set_args(greedy_args)
5737                         del greedy_args
5738
5739                         # Revise greedy atoms, accounting for any blockers
5740                         # between selected packages.
5741                         revised_greedy_args = []
5742                         for arg in args:
5743                                 revised_greedy_args.append(arg)
5744                                 if not isinstance(arg, AtomArg):
5745                                         continue
5746                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5747                                         blocker_lookahead=True):
5748                                         revised_greedy_args.append(
5749                                                 AtomArg(arg=arg.arg, atom=atom,
5750                                                         root_config=arg.root_config))
5751                         args = revised_greedy_args
5752                         del revised_greedy_args
5753
5754                 self._set_args(args)
5755
5756                 myfavorites = set(myfavorites)
5757                 for arg in args:
5758                         if isinstance(arg, (AtomArg, PackageArg)):
5759                                 myfavorites.add(arg.atom)
5760                         elif isinstance(arg, SetArg):
5761                                 myfavorites.add(arg.arg)
5762                 myfavorites = list(myfavorites)
5763
5764                 pprovideddict = pkgsettings.pprovideddict
5765                 if debug:
5766                         portage.writemsg("\n", noiselevel=-1)
5767                 # Order needs to be preserved since a feature of --nodeps
5768                 # is to allow the user to force a specific merge order.
5769                 args.reverse()
5770                 while args:
5771                         arg = args.pop()
5772                         for atom in arg.set:
5773                                 self.spinner.update()
5774                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5775                                         root=myroot, parent=arg)
5776                                 atom_cp = portage.dep_getkey(atom)
5777                                 try:
5778                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5779                                         if pprovided and portage.match_from_list(atom, pprovided):
5780                                                 # A provided package has been specified on the command line.
5781                                                 self._pprovided_args.append((arg, atom))
5782                                                 continue
5783                                         if isinstance(arg, PackageArg):
5784                                                 if not self._add_pkg(arg.package, dep) or \
5785                                                         not self._create_graph():
5786                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5787                                                                 "dependencies for %s\n") % arg.arg)
5788                                                         return 0, myfavorites
5789                                                 continue
5790                                         if debug:
5791                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5792                                                         (arg, atom), noiselevel=-1)
5793                                         pkg, existing_node = self._select_package(
5794                                                 myroot, atom, onlydeps=onlydeps)
5795                                         if not pkg:
5796                                                 if not (isinstance(arg, SetArg) and \
5797                                                         arg.name in ("system", "world")):
5798                                                         self._unsatisfied_deps_for_display.append(
5799                                                                 ((myroot, atom), {}))
5800                                                         return 0, myfavorites
5801                                                 self._missing_args.append((arg, atom))
5802                                                 continue
5803                                         if atom_cp != pkg.cp:
5804                                                 # For old-style virtuals, we need to repeat the
5805                                                 # package.provided check against the selected package.
5806                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5807                                                 pprovided = pprovideddict.get(pkg.cp)
5808                                                 if pprovided and \
5809                                                         portage.match_from_list(expanded_atom, pprovided):
5810                                                         # A provided package has been
5811                                                         # specified on the command line.
5812                                                         self._pprovided_args.append((arg, atom))
5813                                                         continue
5814                                         if pkg.installed and "selective" not in self.myparams:
5815                                                 self._unsatisfied_deps_for_display.append(
5816                                                         ((myroot, atom), {}))
5817                                                 # Previous behavior was to bail out in this case, but
5818                                                 # since the dep is satisfied by the installed package,
5819                                                 # it's more friendly to continue building the graph
5820                                                 # and just show a warning message. Therefore, only bail
5821                                                 # out here if the atom is not from either the system or
5822                                                 # world set.
5823                                                 if not (isinstance(arg, SetArg) and \
5824                                                         arg.name in ("system", "world")):
5825                                                         return 0, myfavorites
5826
5827                                         # Add the selected package to the graph as soon as possible
5828                                         # so that later dep_check() calls can use it as feedback
5829                                         # for making more consistent atom selections.
5830                                         if not self._add_pkg(pkg, dep):
5831                                                 if isinstance(arg, SetArg):
5832                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5833                                                                 "dependencies for %s from %s\n") % \
5834                                                                 (atom, arg.arg))
5835                                                 else:
5836                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5837                                                                 "dependencies for %s\n") % atom)
5838                                                 return 0, myfavorites
5839
5840                                 except portage.exception.MissingSignature, e:
5841                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5842                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5843                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5844                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5845                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5846                                         return 0, myfavorites
5847                                 except portage.exception.InvalidSignature, e:
5848                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5849                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5850                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5851                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5852                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5853                                         return 0, myfavorites
5854                                 except SystemExit, e:
5855                                         raise # Needed else can't exit
5856                                 except Exception, e:
5857                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5858                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5859                                         raise
5860
5861                 # Now that the root packages have been added to the graph,
5862                 # process the dependencies.
5863                 if not self._create_graph():
5864                         return 0, myfavorites
5865
5866                 missing=0
5867                 if "--usepkgonly" in self.myopts:
5868                         for xs in self.digraph.all_nodes():
5869                                 if not isinstance(xs, Package):
5870                                         continue
5871                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5872                                         if missing == 0:
5873                                                 print
5874                                         missing += 1
5875                                         print "Missing binary for:",xs[2]
5876
5877                 try:
5878                         self.altlist()
5879                 except self._unknown_internal_error:
5880                         return False, myfavorites
5881
5882                 # We're true here unless we are missing binaries.
5883                 return (not missing,myfavorites)
5884
5885         def _set_args(self, args):
5886                 """
5887                 Create the "args" package set from atoms and packages given as
5888                 arguments. This method can be called multiple times if necessary.
5889                 The package selection cache is automatically invalidated, since
5890                 arguments influence package selections.
5891                 """
5892                 args_set = self._sets["args"]
5893                 args_set.clear()
5894                 for arg in args:
5895                         if not isinstance(arg, (AtomArg, PackageArg)):
5896                                 continue
5897                         atom = arg.atom
5898                         if atom in args_set:
5899                                 continue
5900                         args_set.add(atom)
5901
5902                 self._set_atoms.clear()
5903                 self._set_atoms.update(chain(*self._sets.itervalues()))
5904                 atom_arg_map = self._atom_arg_map
5905                 atom_arg_map.clear()
5906                 for arg in args:
5907                         for atom in arg.set:
5908                                 atom_key = (atom, arg.root_config.root)
5909                                 refs = atom_arg_map.get(atom_key)
5910                                 if refs is None:
5911                                         refs = []
5912                                         atom_arg_map[atom_key] = refs
5913                                         if arg not in refs:
5914                                                 refs.append(arg)
5915
5916                 # Invalidate the package selection cache, since
5917                 # arguments influence package selections.
5918                 self._highest_pkg_cache.clear()
5919                 for trees in self._filtered_trees.itervalues():
5920                         trees["porttree"].dbapi._clear_cache()
5921
5922         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5923                 """
5924                 Return a list of slot atoms corresponding to installed slots that
5925                 differ from the slot of the highest visible match. When
5926                 blocker_lookahead is True, slot atoms that would trigger a blocker
5927                 conflict are automatically discarded, potentially allowing automatic
5928                 uninstallation of older slots when appropriate.
5929                 """
5930                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5931                 if highest_pkg is None:
5932                         return []
5933                 vardb = root_config.trees["vartree"].dbapi
5934                 slots = set()
5935                 for cpv in vardb.match(atom):
5936                         # don't mix new virtuals with old virtuals
5937                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5938                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5939
5940                 slots.add(highest_pkg.metadata["SLOT"])
5941                 if len(slots) == 1:
5942                         return []
5943                 greedy_pkgs = []
5944                 slots.remove(highest_pkg.metadata["SLOT"])
5945                 while slots:
5946                         slot = slots.pop()
5947                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5948                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5949                         if pkg is not None and \
5950                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5951                                 greedy_pkgs.append(pkg)
5952                 if not greedy_pkgs:
5953                         return []
5954                 if not blocker_lookahead:
5955                         return [pkg.slot_atom for pkg in greedy_pkgs]
5956
5957                 blockers = {}
5958                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5959                 for pkg in greedy_pkgs + [highest_pkg]:
5960                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5961                         try:
5962                                 atoms = self._select_atoms(
5963                                         pkg.root, dep_str, pkg.use.enabled,
5964                                         parent=pkg, strict=True)
5965                         except portage.exception.InvalidDependString:
5966                                 continue
5967                         blocker_atoms = (x for x in atoms if x.blocker)
5968                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5969
5970                 if highest_pkg not in blockers:
5971                         return []
5972
5973                 # filter packages with invalid deps
5974                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5975
5976                 # filter packages that conflict with highest_pkg
5977                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5978                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5979                         blockers[pkg].findAtomForPackage(highest_pkg))]
5980
5981                 if not greedy_pkgs:
5982                         return []
5983
5984                 # If two packages conflict, discard the lower version.
5985                 discard_pkgs = set()
5986                 greedy_pkgs.sort(reverse=True)
5987                 for i in xrange(len(greedy_pkgs) - 1):
5988                         pkg1 = greedy_pkgs[i]
5989                         if pkg1 in discard_pkgs:
5990                                 continue
5991                         for j in xrange(i + 1, len(greedy_pkgs)):
5992                                 pkg2 = greedy_pkgs[j]
5993                                 if pkg2 in discard_pkgs:
5994                                         continue
5995                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5996                                         blockers[pkg2].findAtomForPackage(pkg1):
5997                                         # pkg1 > pkg2
5998                                         discard_pkgs.add(pkg2)
5999
6000                 return [pkg.slot_atom for pkg in greedy_pkgs \
6001                         if pkg not in discard_pkgs]
6002
6003         def _select_atoms_from_graph(self, *pargs, **kwargs):
6004                 """
6005                 Prefer atoms matching packages that have already been
6006                 added to the graph or those that are installed and have
6007                 not been scheduled for replacement.
6008                 """
6009                 kwargs["trees"] = self._graph_trees
6010                 return self._select_atoms_highest_available(*pargs, **kwargs)
6011
6012         def _select_atoms_highest_available(self, root, depstring,
6013                 myuse=None, parent=None, strict=True, trees=None, priority=None):
6014                 """This will raise InvalidDependString if necessary. If trees is
6015                 None then self._filtered_trees is used."""
6016                 pkgsettings = self.pkgsettings[root]
6017                 if trees is None:
6018                         trees = self._filtered_trees
6019                 if not getattr(priority, "buildtime", False):
6020                         # The parent should only be passed to dep_check() for buildtime
6021                         # dependencies since that's the only case when it's appropriate
6022                         # to trigger the circular dependency avoidance code which uses it.
6023                         # It's important not to trigger the same circular dependency
6024                         # avoidance code for runtime dependencies since it's not needed
6025                         # and it can promote an incorrect package choice.
6026                         parent = None
6027                 if True:
6028                         try:
6029                                 if parent is not None:
6030                                         trees[root]["parent"] = parent
6031                                 if not strict:
6032                                         portage.dep._dep_check_strict = False
6033                                 mycheck = portage.dep_check(depstring, None,
6034                                         pkgsettings, myuse=myuse,
6035                                         myroot=root, trees=trees)
6036                         finally:
6037                                 if parent is not None:
6038                                         trees[root].pop("parent")
6039                                 portage.dep._dep_check_strict = True
6040                         if not mycheck[0]:
6041                                 raise portage.exception.InvalidDependString(mycheck[1])
6042                         selected_atoms = mycheck[1]
6043                 return selected_atoms
6044
6045         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6046                 atom = portage.dep.Atom(atom)
6047                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6048                 atom_without_use = atom
6049                 if atom.use:
6050                         atom_without_use = portage.dep.remove_slot(atom)
6051                         if atom.slot:
6052                                 atom_without_use += ":" + atom.slot
6053                         atom_without_use = portage.dep.Atom(atom_without_use)
6054                 xinfo = '"%s"' % atom
6055                 if arg:
6056                         xinfo='"%s"' % arg
6057                 # Discard null/ from failed cpv_expand category expansion.
6058                 xinfo = xinfo.replace("null/", "")
6059                 masked_packages = []
6060                 missing_use = []
6061                 masked_pkg_instances = set()
6062                 missing_licenses = []
6063                 have_eapi_mask = False
6064                 pkgsettings = self.pkgsettings[root]
6065                 implicit_iuse = pkgsettings._get_implicit_iuse()
6066                 root_config = self.roots[root]
6067                 portdb = self.roots[root].trees["porttree"].dbapi
6068                 dbs = self._filtered_trees[root]["dbs"]
6069                 for db, pkg_type, built, installed, db_keys in dbs:
6070                         if installed:
6071                                 continue
6072                         match = db.match
6073                         if hasattr(db, "xmatch"):
6074                                 cpv_list = db.xmatch("match-all", atom_without_use)
6075                         else:
6076                                 cpv_list = db.match(atom_without_use)
6077                         # descending order
6078                         cpv_list.reverse()
6079                         for cpv in cpv_list:
6080                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6081                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6082                                 if metadata is not None:
6083                                         pkg = Package(built=built, cpv=cpv,
6084                                                 installed=installed, metadata=metadata,
6085                                                 root_config=root_config)
6086                                         if pkg.cp != atom.cp:
6087                                                 # A cpv can be returned from dbapi.match() as an
6088                                                 # old-style virtual match even in cases when the
6089                                                 # package does not actually PROVIDE the virtual.
6090                                                 # Filter out any such false matches here.
6091                                                 if not atom_set.findAtomForPackage(pkg):
6092                                                         continue
6093                                         if mreasons:
6094                                                 masked_pkg_instances.add(pkg)
6095                                         if atom.use:
6096                                                 missing_use.append(pkg)
6097                                                 if not mreasons:
6098                                                         continue
6099                                 masked_packages.append(
6100                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6101
6102                 missing_use_reasons = []
6103                 missing_iuse_reasons = []
6104                 for pkg in missing_use:
6105                         use = pkg.use.enabled
6106                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6107                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6108                         missing_iuse = []
6109                         for x in atom.use.required:
6110                                 if iuse_re.match(x) is None:
6111                                         missing_iuse.append(x)
6112                         mreasons = []
6113                         if missing_iuse:
6114                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6115                                 missing_iuse_reasons.append((pkg, mreasons))
6116                         else:
6117                                 need_enable = sorted(atom.use.enabled.difference(use))
6118                                 need_disable = sorted(atom.use.disabled.intersection(use))
6119                                 if need_enable or need_disable:
6120                                         changes = []
6121                                         changes.extend(colorize("red", "+" + x) \
6122                                                 for x in need_enable)
6123                                         changes.extend(colorize("blue", "-" + x) \
6124                                                 for x in need_disable)
6125                                         mreasons.append("Change USE: %s" % " ".join(changes))
6126                                         missing_use_reasons.append((pkg, mreasons))
6127
6128                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6129                         in missing_use_reasons if pkg not in masked_pkg_instances]
6130
6131                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6132                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6133
6134                 show_missing_use = False
6135                 if unmasked_use_reasons:
6136                         # Only show the latest version.
6137                         show_missing_use = unmasked_use_reasons[:1]
6138                 elif unmasked_iuse_reasons:
6139                         if missing_use_reasons:
6140                                 # All packages with required IUSE are masked,
6141                                 # so display a normal masking message.
6142                                 pass
6143                         else:
6144                                 show_missing_use = unmasked_iuse_reasons
6145
6146                 if show_missing_use:
6147                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6148                         print "!!! One of the following packages is required to complete your request:"
6149                         for pkg, mreasons in show_missing_use:
6150                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6151
6152                 elif masked_packages:
6153                         print "\n!!! " + \
6154                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6155                                 colorize("INFORM", xinfo) + \
6156                                 colorize("BAD", " have been masked.")
6157                         print "!!! One of the following masked packages is required to complete your request:"
6158                         have_eapi_mask = show_masked_packages(masked_packages)
6159                         if have_eapi_mask:
6160                                 print
6161                                 msg = ("The current version of portage supports " + \
6162                                         "EAPI '%s'. You must upgrade to a newer version" + \
6163                                         " of portage before EAPI masked packages can" + \
6164                                         " be installed.") % portage.const.EAPI
6165                                 from textwrap import wrap
6166                                 for line in wrap(msg, 75):
6167                                         print line
6168                         print
6169                         show_mask_docs()
6170                 else:
6171                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6172
6173                 # Show parent nodes and the argument that pulled them in.
6174                 traversed_nodes = set()
6175                 node = myparent
6176                 msg = []
6177                 while node is not None:
6178                         traversed_nodes.add(node)
6179                         msg.append('(dependency required by "%s" [%s])' % \
6180                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6181                         # When traversing to parents, prefer arguments over packages
6182                         # since arguments are root nodes. Never traverse the same
6183                         # package twice, in order to prevent an infinite loop.
6184                         selected_parent = None
6185                         for parent in self.digraph.parent_nodes(node):
6186                                 if isinstance(parent, DependencyArg):
6187                                         msg.append('(dependency required by "%s" [argument])' % \
6188                                                 (colorize('INFORM', str(parent))))
6189                                         selected_parent = None
6190                                         break
6191                                 if parent not in traversed_nodes:
6192                                         selected_parent = parent
6193                         node = selected_parent
6194                 for line in msg:
6195                         print line
6196
6197                 print
6198
6199         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6200                 cache_key = (root, atom, onlydeps)
6201                 ret = self._highest_pkg_cache.get(cache_key)
6202                 if ret is not None:
6203                         pkg, existing = ret
6204                         if pkg and not existing:
6205                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6206                                 if existing and existing == pkg:
6207                                         # Update the cache to reflect that the
6208                                         # package has been added to the graph.
6209                                         ret = pkg, pkg
6210                                         self._highest_pkg_cache[cache_key] = ret
6211                         return ret
6212                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6213                 self._highest_pkg_cache[cache_key] = ret
6214                 pkg, existing = ret
6215                 if pkg is not None:
6216                         settings = pkg.root_config.settings
6217                         if visible(settings, pkg) and not (pkg.installed and \
6218                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6219                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6220                 return ret
6221
6222         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6223                 root_config = self.roots[root]
6224                 pkgsettings = self.pkgsettings[root]
6225                 dbs = self._filtered_trees[root]["dbs"]
6226                 vardb = self.roots[root].trees["vartree"].dbapi
6227                 portdb = self.roots[root].trees["porttree"].dbapi
6228                 # List of acceptable packages, ordered by type preference.
6229                 matched_packages = []
6230                 highest_version = None
6231                 if not isinstance(atom, portage.dep.Atom):
6232                         atom = portage.dep.Atom(atom)
6233                 atom_cp = atom.cp
6234                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6235                 existing_node = None
6236                 myeb = None
6237                 usepkgonly = "--usepkgonly" in self.myopts
6238                 empty = "empty" in self.myparams
6239                 selective = "selective" in self.myparams
6240                 reinstall = False
6241                 noreplace = "--noreplace" in self.myopts
6242                 # Behavior of the "selective" parameter depends on
6243                 # whether or not a package matches an argument atom.
6244                 # If an installed package provides an old-style
6245                 # virtual that is no longer provided by an available
6246                 # package, the installed package may match an argument
6247                 # atom even though none of the available packages do.
6248                 # Therefore, "selective" logic does not consider
6249                 # whether or not an installed package matches an
6250                 # argument atom. It only considers whether or not
6251                 # available packages match argument atoms, which is
6252                 # represented by the found_available_arg flag.
6253                 found_available_arg = False
6254                 for find_existing_node in True, False:
6255                         if existing_node:
6256                                 break
6257                         for db, pkg_type, built, installed, db_keys in dbs:
6258                                 if existing_node:
6259                                         break
6260                                 if installed and not find_existing_node:
6261                                         want_reinstall = reinstall or empty or \
6262                                                 (found_available_arg and not selective)
6263                                         if want_reinstall and matched_packages:
6264                                                 continue
6265                                 if hasattr(db, "xmatch"):
6266                                         cpv_list = db.xmatch("match-all", atom)
6267                                 else:
6268                                         cpv_list = db.match(atom)
6269
6270                                 # USE=multislot can make an installed package appear as if
6271                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6272                                 # won't do any good as long as USE=multislot is enabled since
6273                                 # the newly built package still won't have the expected slot.
6274                                 # Therefore, assume that such SLOT dependencies are already
6275                                 # satisfied rather than forcing a rebuild.
6276                                 if installed and not cpv_list and atom.slot:
6277                                         for cpv in db.match(atom.cp):
6278                                                 slot_available = False
6279                                                 for other_db, other_type, other_built, \
6280                                                         other_installed, other_keys in dbs:
6281                                                         try:
6282                                                                 if atom.slot == \
6283                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6284                                                                         slot_available = True
6285                                                                         break
6286                                                         except KeyError:
6287                                                                 pass
6288                                                 if not slot_available:
6289                                                         continue
6290                                                 inst_pkg = self._pkg(cpv, "installed",
6291                                                         root_config, installed=installed)
6292                                                 # Remove the slot from the atom and verify that
6293                                                 # the package matches the resulting atom.
6294                                                 atom_without_slot = portage.dep.remove_slot(atom)
6295                                                 if atom.use:
6296                                                         atom_without_slot += str(atom.use)
6297                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6298                                                 if portage.match_from_list(
6299                                                         atom_without_slot, [inst_pkg]):
6300                                                         cpv_list = [inst_pkg.cpv]
6301                                                 break
6302
6303                                 if not cpv_list:
6304                                         continue
6305                                 pkg_status = "merge"
6306                                 if installed or onlydeps:
6307                                         pkg_status = "nomerge"
6308                                 # descending order
6309                                 cpv_list.reverse()
6310                                 for cpv in cpv_list:
6311                                         # Make --noreplace take precedence over --newuse.
6312                                         if not installed and noreplace and \
6313                                                 cpv in vardb.match(atom):
6314                                                 # If the installed version is masked, it may
6315                                                 # be necessary to look at lower versions,
6316                                                 # in case there is a visible downgrade.
6317                                                 continue
6318                                         reinstall_for_flags = None
6319                                         cache_key = (pkg_type, root, cpv, pkg_status)
6320                                         calculated_use = True
6321                                         pkg = self._pkg_cache.get(cache_key)
6322                                         if pkg is None:
6323                                                 calculated_use = False
6324                                                 try:
6325                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6326                                                 except KeyError:
6327                                                         continue
6328                                                 pkg = Package(built=built, cpv=cpv,
6329                                                         installed=installed, metadata=metadata,
6330                                                         onlydeps=onlydeps, root_config=root_config,
6331                                                         type_name=pkg_type)
6332                                                 metadata = pkg.metadata
6333                                                 if not built:
6334                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6335                                                 if not built and ("?" in metadata["LICENSE"] or \
6336                                                         "?" in metadata["PROVIDE"]):
6337                                                         # This is avoided whenever possible because
6338                                                         # it's expensive. It only needs to be done here
6339                                                         # if it has an effect on visibility.
6340                                                         pkgsettings.setcpv(pkg)
6341                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6342                                                         calculated_use = True
6343                                                 self._pkg_cache[pkg] = pkg
6344
6345                                         if not installed or (built and matched_packages):
6346                                                 # Only enforce visibility on installed packages
6347                                                 # if there is at least one other visible package
6348                                                 # available. By filtering installed masked packages
6349                                                 # here, packages that have been masked since they
6350                                                 # were installed can be automatically downgraded
6351                                                 # to an unmasked version.
6352                                                 try:
6353                                                         if not visible(pkgsettings, pkg):
6354                                                                 continue
6355                                                 except portage.exception.InvalidDependString:
6356                                                         if not installed:
6357                                                                 continue
6358
6359                                                 # Enable upgrade or downgrade to a version
6360                                                 # with visible KEYWORDS when the installed
6361                                                 # version is masked by KEYWORDS, but never
6362                                                 # reinstall the same exact version only due
6363                                                 # to a KEYWORDS mask.
6364                                                 if built and matched_packages:
6365
6366                                                         different_version = None
6367                                                         for avail_pkg in matched_packages:
6368                                                                 if not portage.dep.cpvequal(
6369                                                                         pkg.cpv, avail_pkg.cpv):
6370                                                                         different_version = avail_pkg
6371                                                                         break
6372                                                         if different_version is not None:
6373
6374                                                                 if installed and \
6375                                                                         pkgsettings._getMissingKeywords(
6376                                                                         pkg.cpv, pkg.metadata):
6377                                                                         continue
6378
6379                                                                 # If the ebuild no longer exists or it's
6380                                                                 # keywords have been dropped, reject built
6381                                                                 # instances (installed or binary).
6382                                                                 # If --usepkgonly is enabled, assume that
6383                                                                 # the ebuild status should be ignored.
6384                                                                 if not usepkgonly:
6385                                                                         try:
6386                                                                                 pkg_eb = self._pkg(
6387                                                                                         pkg.cpv, "ebuild", root_config)
6388                                                                         except portage.exception.PackageNotFound:
6389                                                                                 continue
6390                                                                         else:
6391                                                                                 if not visible(pkgsettings, pkg_eb):
6392                                                                                         continue
6393
6394                                         if not pkg.built and not calculated_use:
6395                                                 # This is avoided whenever possible because
6396                                                 # it's expensive.
6397                                                 pkgsettings.setcpv(pkg)
6398                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6399
6400                                         if pkg.cp != atom.cp:
6401                                                 # A cpv can be returned from dbapi.match() as an
6402                                                 # old-style virtual match even in cases when the
6403                                                 # package does not actually PROVIDE the virtual.
6404                                                 # Filter out any such false matches here.
6405                                                 if not atom_set.findAtomForPackage(pkg):
6406                                                         continue
6407
6408                                         myarg = None
6409                                         if root == self.target_root:
6410                                                 try:
6411                                                         # Ebuild USE must have been calculated prior
6412                                                         # to this point, in case atoms have USE deps.
6413                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6414                                                 except StopIteration:
6415                                                         pass
6416                                                 except portage.exception.InvalidDependString:
6417                                                         if not installed:
6418                                                                 # masked by corruption
6419                                                                 continue
6420                                         if not installed and myarg:
6421                                                 found_available_arg = True
6422
6423                                         if atom.use and not pkg.built:
6424                                                 use = pkg.use.enabled
6425                                                 if atom.use.enabled.difference(use):
6426                                                         continue
6427                                                 if atom.use.disabled.intersection(use):
6428                                                         continue
6429                                         if pkg.cp == atom_cp:
6430                                                 if highest_version is None:
6431                                                         highest_version = pkg
6432                                                 elif pkg > highest_version:
6433                                                         highest_version = pkg
6434                                         # At this point, we've found the highest visible
6435                                         # match from the current repo. Any lower versions
6436                                         # from this repo are ignored, so this so the loop
6437                                         # will always end with a break statement below
6438                                         # this point.
6439                                         if find_existing_node:
6440                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6441                                                 if not e_pkg:
6442                                                         break
6443                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6444                                                         if highest_version and \
6445                                                                 e_pkg.cp == atom_cp and \
6446                                                                 e_pkg < highest_version and \
6447                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6448                                                                 # There is a higher version available in a
6449                                                                 # different slot, so this existing node is
6450                                                                 # irrelevant.
6451                                                                 pass
6452                                                         else:
6453                                                                 matched_packages.append(e_pkg)
6454                                                                 existing_node = e_pkg
6455                                                 break
6456                                         # Compare built package to current config and
6457                                         # reject the built package if necessary.
6458                                         if built and not installed and \
6459                                                 ("--newuse" in self.myopts or \
6460                                                 "--reinstall" in self.myopts):
6461                                                 iuses = pkg.iuse.all
6462                                                 old_use = pkg.use.enabled
6463                                                 if myeb:
6464                                                         pkgsettings.setcpv(myeb)
6465                                                 else:
6466                                                         pkgsettings.setcpv(pkg)
6467                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6468                                                 forced_flags = set()
6469                                                 forced_flags.update(pkgsettings.useforce)
6470                                                 forced_flags.update(pkgsettings.usemask)
6471                                                 cur_iuse = iuses
6472                                                 if myeb and not usepkgonly:
6473                                                         cur_iuse = myeb.iuse.all
6474                                                 if self._reinstall_for_flags(forced_flags,
6475                                                         old_use, iuses,
6476                                                         now_use, cur_iuse):
6477                                                         break
6478                                         # Compare current config to installed package
6479                                         # and do not reinstall if possible.
6480                                         if not installed and \
6481                                                 ("--newuse" in self.myopts or \
6482                                                 "--reinstall" in self.myopts) and \
6483                                                 cpv in vardb.match(atom):
6484                                                 pkgsettings.setcpv(pkg)
6485                                                 forced_flags = set()
6486                                                 forced_flags.update(pkgsettings.useforce)
6487                                                 forced_flags.update(pkgsettings.usemask)
6488                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6489                                                 old_iuse = set(filter_iuse_defaults(
6490                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6491                                                 cur_use = pkg.use.enabled
6492                                                 cur_iuse = pkg.iuse.all
6493                                                 reinstall_for_flags = \
6494                                                         self._reinstall_for_flags(
6495                                                         forced_flags, old_use, old_iuse,
6496                                                         cur_use, cur_iuse)
6497                                                 if reinstall_for_flags:
6498                                                         reinstall = True
6499                                         if not built:
6500                                                 myeb = pkg
6501                                         matched_packages.append(pkg)
6502                                         if reinstall_for_flags:
6503                                                 self._reinstall_nodes[pkg] = \
6504                                                         reinstall_for_flags
6505                                         break
6506
6507                 if not matched_packages:
6508                         return None, None
6509
6510                 if "--debug" in self.myopts:
6511                         for pkg in matched_packages:
6512                                 portage.writemsg("%s %s\n" % \
6513                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6514
6515                 # Filter out any old-style virtual matches if they are
6516                 # mixed with new-style virtual matches.
6517                 cp = portage.dep_getkey(atom)
6518                 if len(matched_packages) > 1 and \
6519                         "virtual" == portage.catsplit(cp)[0]:
6520                         for pkg in matched_packages:
6521                                 if pkg.cp != cp:
6522                                         continue
6523                                 # Got a new-style virtual, so filter
6524                                 # out any old-style virtuals.
6525                                 matched_packages = [pkg for pkg in matched_packages \
6526                                         if pkg.cp == cp]
6527                                 break
6528
6529                 if len(matched_packages) > 1:
6530                         bestmatch = portage.best(
6531                                 [pkg.cpv for pkg in matched_packages])
6532                         matched_packages = [pkg for pkg in matched_packages \
6533                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6534
6535                 # ordered by type preference ("ebuild" type is the last resort)
6536                 return  matched_packages[-1], existing_node
6537
6538         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6539                 """
6540                 Select packages that have already been added to the graph or
6541                 those that are installed and have not been scheduled for
6542                 replacement.
6543                 """
6544                 graph_db = self._graph_trees[root]["porttree"].dbapi
6545                 matches = graph_db.match_pkgs(atom)
6546                 if not matches:
6547                         return None, None
6548                 pkg = matches[-1] # highest match
6549                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6550                 return pkg, in_graph
6551
6552         def _complete_graph(self):
6553                 """
6554                 Add any deep dependencies of required sets (args, system, world) that
6555                 have not been pulled into the graph yet. This ensures that the graph
6556                 is consistent such that initially satisfied deep dependencies are not
6557                 broken in the new graph. Initially unsatisfied dependencies are
6558                 irrelevant since we only want to avoid breaking dependencies that are
6559                 intially satisfied.
6560
6561                 Since this method can consume enough time to disturb users, it is
6562                 currently only enabled by the --complete-graph option.
6563                 """
6564                 if "--buildpkgonly" in self.myopts or \
6565                         "recurse" not in self.myparams:
6566                         return 1
6567
6568                 if "complete" not in self.myparams:
6569                         # Skip this to avoid consuming enough time to disturb users.
6570                         return 1
6571
6572                 # Put the depgraph into a mode that causes it to only
6573                 # select packages that have already been added to the
6574                 # graph or those that are installed and have not been
6575                 # scheduled for replacement. Also, toggle the "deep"
6576                 # parameter so that all dependencies are traversed and
6577                 # accounted for.
6578                 self._select_atoms = self._select_atoms_from_graph
6579                 self._select_package = self._select_pkg_from_graph
6580                 already_deep = "deep" in self.myparams
6581                 if not already_deep:
6582                         self.myparams.add("deep")
6583
6584                 for root in self.roots:
6585                         required_set_names = self._required_set_names.copy()
6586                         if root == self.target_root and \
6587                                 (already_deep or "empty" in self.myparams):
6588                                 required_set_names.difference_update(self._sets)
6589                         if not required_set_names and not self._ignored_deps:
6590                                 continue
6591                         root_config = self.roots[root]
6592                         setconfig = root_config.setconfig
6593                         args = []
6594                         # Reuse existing SetArg instances when available.
6595                         for arg in self.digraph.root_nodes():
6596                                 if not isinstance(arg, SetArg):
6597                                         continue
6598                                 if arg.root_config != root_config:
6599                                         continue
6600                                 if arg.name in required_set_names:
6601                                         args.append(arg)
6602                                         required_set_names.remove(arg.name)
6603                         # Create new SetArg instances only when necessary.
6604                         for s in required_set_names:
6605                                 expanded_set = InternalPackageSet(
6606                                         initial_atoms=setconfig.getSetAtoms(s))
6607                                 atom = SETPREFIX + s
6608                                 args.append(SetArg(arg=atom, set=expanded_set,
6609                                         root_config=root_config))
6610                         vardb = root_config.trees["vartree"].dbapi
6611                         for arg in args:
6612                                 for atom in arg.set:
6613                                         self._dep_stack.append(
6614                                                 Dependency(atom=atom, root=root, parent=arg))
6615                         if self._ignored_deps:
6616                                 self._dep_stack.extend(self._ignored_deps)
6617                                 self._ignored_deps = []
6618                         if not self._create_graph(allow_unsatisfied=True):
6619                                 return 0
6620                         # Check the unsatisfied deps to see if any initially satisfied deps
6621                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6622                         # deps are irrelevant since we only want to avoid breaking deps
6623                         # that are initially satisfied.
6624                         while self._unsatisfied_deps:
6625                                 dep = self._unsatisfied_deps.pop()
6626                                 matches = vardb.match_pkgs(dep.atom)
6627                                 if not matches:
6628                                         self._initially_unsatisfied_deps.append(dep)
6629                                         continue
6630                                 # An scheduled installation broke a deep dependency.
6631                                 # Add the installed package to the graph so that it
6632                                 # will be appropriately reported as a slot collision
6633                                 # (possibly solvable via backtracking).
6634                                 pkg = matches[-1] # highest match
6635                                 if not self._add_pkg(pkg, dep):
6636                                         return 0
6637                                 if not self._create_graph(allow_unsatisfied=True):
6638                                         return 0
6639                 return 1
6640
6641         def _pkg(self, cpv, type_name, root_config, installed=False):
6642                 """
6643                 Get a package instance from the cache, or create a new
6644                 one if necessary. Raises KeyError from aux_get if it
6645                 failures for some reason (package does not exist or is
6646                 corrupt).
6647                 """
6648                 operation = "merge"
6649                 if installed:
6650                         operation = "nomerge"
6651                 pkg = self._pkg_cache.get(
6652                         (type_name, root_config.root, cpv, operation))
6653                 if pkg is None:
6654                         tree_type = self.pkg_tree_map[type_name]
6655                         db = root_config.trees[tree_type].dbapi
6656                         db_keys = list(self._trees_orig[root_config.root][
6657                                 tree_type].dbapi._aux_cache_keys)
6658                         try:
6659                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6660                         except KeyError:
6661                                 raise portage.exception.PackageNotFound(cpv)
6662                         pkg = Package(cpv=cpv, metadata=metadata,
6663                                 root_config=root_config, installed=installed)
6664                         if type_name == "ebuild":
6665                                 settings = self.pkgsettings[root_config.root]
6666                                 settings.setcpv(pkg)
6667                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6668                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6669                         self._pkg_cache[pkg] = pkg
6670                 return pkg
6671
6672         def validate_blockers(self):
6673                 """Remove any blockers from the digraph that do not match any of the
6674                 packages within the graph.  If necessary, create hard deps to ensure
6675                 correct merge order such that mutually blocking packages are never
6676                 installed simultaneously."""
6677
6678                 if "--buildpkgonly" in self.myopts or \
6679                         "--nodeps" in self.myopts:
6680                         return True
6681
6682                 #if "deep" in self.myparams:
6683                 if True:
6684                         # Pull in blockers from all installed packages that haven't already
6685                         # been pulled into the depgraph.  This is not enabled by default
6686                         # due to the performance penalty that is incurred by all the
6687                         # additional dep_check calls that are required.
6688
6689                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6690                         for myroot in self.trees:
6691                                 vardb = self.trees[myroot]["vartree"].dbapi
6692                                 portdb = self.trees[myroot]["porttree"].dbapi
6693                                 pkgsettings = self.pkgsettings[myroot]
6694                                 final_db = self.mydbapi[myroot]
6695
6696                                 blocker_cache = BlockerCache(myroot, vardb)
6697                                 stale_cache = set(blocker_cache)
6698                                 for pkg in vardb:
6699                                         cpv = pkg.cpv
6700                                         stale_cache.discard(cpv)
6701                                         pkg_in_graph = self.digraph.contains(pkg)
6702
6703                                         # Check for masked installed packages. Only warn about
6704                                         # packages that are in the graph in order to avoid warning
6705                                         # about those that will be automatically uninstalled during
6706                                         # the merge process or by --depclean.
6707                                         if pkg in final_db:
6708                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6709                                                         self._masked_installed.add(pkg)
6710
6711                                         blocker_atoms = None
6712                                         blockers = None
6713                                         if pkg_in_graph:
6714                                                 blockers = []
6715                                                 try:
6716                                                         blockers.extend(
6717                                                                 self._blocker_parents.child_nodes(pkg))
6718                                                 except KeyError:
6719                                                         pass
6720                                                 try:
6721                                                         blockers.extend(
6722                                                                 self._irrelevant_blockers.child_nodes(pkg))
6723                                                 except KeyError:
6724                                                         pass
6725                                         if blockers is not None:
6726                                                 blockers = set(str(blocker.atom) \
6727                                                         for blocker in blockers)
6728
6729                                         # If this node has any blockers, create a "nomerge"
6730                                         # node for it so that they can be enforced.
6731                                         self.spinner.update()
6732                                         blocker_data = blocker_cache.get(cpv)
6733                                         if blocker_data is not None and \
6734                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6735                                                 blocker_data = None
6736
6737                                         # If blocker data from the graph is available, use
6738                                         # it to validate the cache and update the cache if
6739                                         # it seems invalid.
6740                                         if blocker_data is not None and \
6741                                                 blockers is not None:
6742                                                 if not blockers.symmetric_difference(
6743                                                         blocker_data.atoms):
6744                                                         continue
6745                                                 blocker_data = None
6746
6747                                         if blocker_data is None and \
6748                                                 blockers is not None:
6749                                                 # Re-use the blockers from the graph.
6750                                                 blocker_atoms = sorted(blockers)
6751                                                 counter = long(pkg.metadata["COUNTER"])
6752                                                 blocker_data = \
6753                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6754                                                 blocker_cache[pkg.cpv] = blocker_data
6755                                                 continue
6756
6757                                         if blocker_data:
6758                                                 blocker_atoms = blocker_data.atoms
6759                                         else:
6760                                                 # Use aux_get() to trigger FakeVartree global
6761                                                 # updates on *DEPEND when appropriate.
6762                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6763                                                 # It is crucial to pass in final_db here in order to
6764                                                 # optimize dep_check calls by eliminating atoms via
6765                                                 # dep_wordreduce and dep_eval calls.
6766                                                 try:
6767                                                         portage.dep._dep_check_strict = False
6768                                                         try:
6769                                                                 success, atoms = portage.dep_check(depstr,
6770                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6771                                                                         trees=self._graph_trees, myroot=myroot)
6772                                                         except Exception, e:
6773                                                                 if isinstance(e, SystemExit):
6774                                                                         raise
6775                                                                 # This is helpful, for example, if a ValueError
6776                                                                 # is thrown from cpv_expand due to multiple
6777                                                                 # matches (this can happen if an atom lacks a
6778                                                                 # category).
6779                                                                 show_invalid_depstring_notice(
6780                                                                         pkg, depstr, str(e))
6781                                                                 del e
6782                                                                 raise
6783                                                 finally:
6784                                                         portage.dep._dep_check_strict = True
6785                                                 if not success:
6786                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6787                                                         if replacement_pkg and \
6788                                                                 replacement_pkg[0].operation == "merge":
6789                                                                 # This package is being replaced anyway, so
6790                                                                 # ignore invalid dependencies so as not to
6791                                                                 # annoy the user too much (otherwise they'd be
6792                                                                 # forced to manually unmerge it first).
6793                                                                 continue
6794                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6795                                                         return False
6796                                                 blocker_atoms = [myatom for myatom in atoms \
6797                                                         if myatom.startswith("!")]
6798                                                 blocker_atoms.sort()
6799                                                 counter = long(pkg.metadata["COUNTER"])
6800                                                 blocker_cache[cpv] = \
6801                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6802                                         if blocker_atoms:
6803                                                 try:
6804                                                         for atom in blocker_atoms:
6805                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6806                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6807                                                                 self._blocker_parents.add(blocker, pkg)
6808                                                 except portage.exception.InvalidAtom, e:
6809                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6810                                                         show_invalid_depstring_notice(
6811                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6812                                                         return False
6813                                 for cpv in stale_cache:
6814                                         del blocker_cache[cpv]
6815                                 blocker_cache.flush()
6816                                 del blocker_cache
6817
6818                 # Discard any "uninstall" tasks scheduled by previous calls
6819                 # to this method, since those tasks may not make sense given
6820                 # the current graph state.
6821                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6822                 if previous_uninstall_tasks:
6823                         self._blocker_uninstalls = digraph()
6824                         self.digraph.difference_update(previous_uninstall_tasks)
6825
6826                 for blocker in self._blocker_parents.leaf_nodes():
6827                         self.spinner.update()
6828                         root_config = self.roots[blocker.root]
6829                         virtuals = root_config.settings.getvirtuals()
6830                         myroot = blocker.root
6831                         initial_db = self.trees[myroot]["vartree"].dbapi
6832                         final_db = self.mydbapi[myroot]
6833                         
6834                         provider_virtual = False
6835                         if blocker.cp in virtuals and \
6836                                 not self._have_new_virt(blocker.root, blocker.cp):
6837                                 provider_virtual = True
6838
6839                         # Use this to check PROVIDE for each matched package
6840                         # when necessary.
6841                         atom_set = InternalPackageSet(
6842                                 initial_atoms=[blocker.atom])
6843
6844                         if provider_virtual:
6845                                 atoms = []
6846                                 for provider_entry in virtuals[blocker.cp]:
6847                                         provider_cp = \
6848                                                 portage.dep_getkey(provider_entry)
6849                                         atoms.append(blocker.atom.replace(
6850                                                 blocker.cp, provider_cp))
6851                         else:
6852                                 atoms = [blocker.atom]
6853
6854                         blocked_initial = set()
6855                         for atom in atoms:
6856                                 for pkg in initial_db.match_pkgs(atom):
6857                                         if atom_set.findAtomForPackage(pkg):
6858                                                 blocked_initial.add(pkg)
6859
6860                         blocked_final = set()
6861                         for atom in atoms:
6862                                 for pkg in final_db.match_pkgs(atom):
6863                                         if atom_set.findAtomForPackage(pkg):
6864                                                 blocked_final.add(pkg)
6865
6866                         if not blocked_initial and not blocked_final:
6867                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6868                                 self._blocker_parents.remove(blocker)
6869                                 # Discard any parents that don't have any more blockers.
6870                                 for pkg in parent_pkgs:
6871                                         self._irrelevant_blockers.add(blocker, pkg)
6872                                         if not self._blocker_parents.child_nodes(pkg):
6873                                                 self._blocker_parents.remove(pkg)
6874                                 continue
6875                         for parent in self._blocker_parents.parent_nodes(blocker):
6876                                 unresolved_blocks = False
6877                                 depends_on_order = set()
6878                                 for pkg in blocked_initial:
6879                                         if pkg.slot_atom == parent.slot_atom:
6880                                                 # TODO: Support blocks within slots in cases where it
6881                                                 # might make sense.  For example, a new version might
6882                                                 # require that the old version be uninstalled at build
6883                                                 # time.
6884                                                 continue
6885                                         if parent.installed:
6886                                                 # Two currently installed packages conflict with
6887                                                 # eachother. Ignore this case since the damage
6888                                                 # is already done and this would be likely to
6889                                                 # confuse users if displayed like a normal blocker.
6890                                                 continue
6891
6892                                         self._blocked_pkgs.add(pkg, blocker)
6893
6894                                         if parent.operation == "merge":
6895                                                 # Maybe the blocked package can be replaced or simply
6896                                                 # unmerged to resolve this block.
6897                                                 depends_on_order.add((pkg, parent))
6898                                                 continue
6899                                         # None of the above blocker resolutions techniques apply,
6900                                         # so apparently this one is unresolvable.
6901                                         unresolved_blocks = True
6902                                 for pkg in blocked_final:
6903                                         if pkg.slot_atom == parent.slot_atom:
6904                                                 # TODO: Support blocks within slots.
6905                                                 continue
6906                                         if parent.operation == "nomerge" and \
6907                                                 pkg.operation == "nomerge":
6908                                                 # This blocker will be handled the next time that a
6909                                                 # merge of either package is triggered.
6910                                                 continue
6911
6912                                         self._blocked_pkgs.add(pkg, blocker)
6913
6914                                         # Maybe the blocking package can be
6915                                         # unmerged to resolve this block.
6916                                         if parent.operation == "merge" and pkg.installed:
6917                                                 depends_on_order.add((pkg, parent))
6918                                                 continue
6919                                         elif parent.operation == "nomerge":
6920                                                 depends_on_order.add((parent, pkg))
6921                                                 continue
6922                                         # None of the above blocker resolutions techniques apply,
6923                                         # so apparently this one is unresolvable.
6924                                         unresolved_blocks = True
6925
6926                                 # Make sure we don't unmerge any package that have been pulled
6927                                 # into the graph.
6928                                 if not unresolved_blocks and depends_on_order:
6929                                         for inst_pkg, inst_task in depends_on_order:
6930                                                 if self.digraph.contains(inst_pkg) and \
6931                                                         self.digraph.parent_nodes(inst_pkg):
6932                                                         unresolved_blocks = True
6933                                                         break
6934
6935                                 if not unresolved_blocks and depends_on_order:
6936                                         for inst_pkg, inst_task in depends_on_order:
6937                                                 uninst_task = Package(built=inst_pkg.built,
6938                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6939                                                         metadata=inst_pkg.metadata,
6940                                                         operation="uninstall",
6941                                                         root_config=inst_pkg.root_config,
6942                                                         type_name=inst_pkg.type_name)
6943                                                 self._pkg_cache[uninst_task] = uninst_task
6944                                                 # Enforce correct merge order with a hard dep.
6945                                                 self.digraph.addnode(uninst_task, inst_task,
6946                                                         priority=BlockerDepPriority.instance)
6947                                                 # Count references to this blocker so that it can be
6948                                                 # invalidated after nodes referencing it have been
6949                                                 # merged.
6950                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6951                                 if not unresolved_blocks and not depends_on_order:
6952                                         self._irrelevant_blockers.add(blocker, parent)
6953                                         self._blocker_parents.remove_edge(blocker, parent)
6954                                         if not self._blocker_parents.parent_nodes(blocker):
6955                                                 self._blocker_parents.remove(blocker)
6956                                         if not self._blocker_parents.child_nodes(parent):
6957                                                 self._blocker_parents.remove(parent)
6958                                 if unresolved_blocks:
6959                                         self._unsolvable_blockers.add(blocker, parent)
6960
6961                 return True
6962
6963         def _accept_blocker_conflicts(self):
6964                 acceptable = False
6965                 for x in ("--buildpkgonly", "--fetchonly",
6966                         "--fetch-all-uri", "--nodeps"):
6967                         if x in self.myopts:
6968                                 acceptable = True
6969                                 break
6970                 return acceptable
6971
6972         def _merge_order_bias(self, mygraph):
6973                 """
6974                 For optimal leaf node selection, promote deep system runtime deps and
6975                 order nodes from highest to lowest overall reference count.
6976                 """
6977
6978                 node_info = {}
6979                 for node in mygraph.order:
6980                         node_info[node] = len(mygraph.parent_nodes(node))
6981                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6982
6983                 def cmp_merge_preference(node1, node2):
6984
6985                         if node1.operation == 'uninstall':
6986                                 if node2.operation == 'uninstall':
6987                                         return 0
6988                                 return 1
6989
6990                         if node2.operation == 'uninstall':
6991                                 if node1.operation == 'uninstall':
6992                                         return 0
6993                                 return -1
6994
6995                         node1_sys = node1 in deep_system_deps
6996                         node2_sys = node2 in deep_system_deps
6997                         if node1_sys != node2_sys:
6998                                 if node1_sys:
6999                                         return -1
7000                                 return 1
7001
7002                         return node_info[node2] - node_info[node1]
7003
7004                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
7005
7006         def altlist(self, reversed=False):
7007
7008                 while self._serialized_tasks_cache is None:
7009                         self._resolve_conflicts()
7010                         try:
7011                                 self._serialized_tasks_cache, self._scheduler_graph = \
7012                                         self._serialize_tasks()
7013                         except self._serialize_tasks_retry:
7014                                 pass
7015
7016                 retlist = self._serialized_tasks_cache[:]
7017                 if reversed:
7018                         retlist.reverse()
7019                 return retlist
7020
7021         def schedulerGraph(self):
7022                 """
7023                 The scheduler graph is identical to the normal one except that
7024                 uninstall edges are reversed in specific cases that require
7025                 conflicting packages to be temporarily installed simultaneously.
7026                 This is intended for use by the Scheduler in it's parallelization
7027                 logic. It ensures that temporary simultaneous installation of
7028                 conflicting packages is avoided when appropriate (especially for
7029                 !!atom blockers), but allowed in specific cases that require it.
7030
7031                 Note that this method calls break_refs() which alters the state of
7032                 internal Package instances such that this depgraph instance should
7033                 not be used to perform any more calculations.
7034                 """
7035                 if self._scheduler_graph is None:
7036                         self.altlist()
7037                 self.break_refs(self._scheduler_graph.order)
7038                 return self._scheduler_graph
7039
7040         def break_refs(self, nodes):
7041                 """
7042                 Take a mergelist like that returned from self.altlist() and
7043                 break any references that lead back to the depgraph. This is
7044                 useful if you want to hold references to packages without
7045                 also holding the depgraph on the heap.
7046                 """
7047                 for node in nodes:
7048                         if hasattr(node, "root_config"):
7049                                 # The FakeVartree references the _package_cache which
7050                                 # references the depgraph. So that Package instances don't
7051                                 # hold the depgraph and FakeVartree on the heap, replace
7052                                 # the RootConfig that references the FakeVartree with the
7053                                 # original RootConfig instance which references the actual
7054                                 # vartree.
7055                                 node.root_config = \
7056                                         self._trees_orig[node.root_config.root]["root_config"]
7057
7058         def _resolve_conflicts(self):
7059                 if not self._complete_graph():
7060                         raise self._unknown_internal_error()
7061
7062                 if not self.validate_blockers():
7063                         raise self._unknown_internal_error()
7064
7065                 if self._slot_collision_info:
7066                         self._process_slot_conflicts()
7067
7068         def _serialize_tasks(self):
7069
7070                 if "--debug" in self.myopts:
7071                         writemsg("\ndigraph:\n\n", noiselevel=-1)
7072                         self.digraph.debug_print()
7073                         writemsg("\n", noiselevel=-1)
7074
7075                 scheduler_graph = self.digraph.copy()
7076
7077                 if '--nodeps' in self.myopts:
7078                         # Preserve the package order given on the command line.
7079                         return ([node for node in scheduler_graph \
7080                                 if isinstance(node, Package) \
7081                                 and node.operation == 'merge'], scheduler_graph)
7082
7083                 mygraph=self.digraph.copy()
7084                 # Prune "nomerge" root nodes if nothing depends on them, since
7085                 # otherwise they slow down merge order calculation. Don't remove
7086                 # non-root nodes since they help optimize merge order in some cases
7087                 # such as revdep-rebuild.
7088                 removed_nodes = set()
7089                 while True:
7090                         for node in mygraph.root_nodes():
7091                                 if not isinstance(node, Package) or \
7092                                         node.installed or node.onlydeps:
7093                                         removed_nodes.add(node)
7094                         if removed_nodes:
7095                                 self.spinner.update()
7096                                 mygraph.difference_update(removed_nodes)
7097                         if not removed_nodes:
7098                                 break
7099                         removed_nodes.clear()
7100                 self._merge_order_bias(mygraph)
7101                 def cmp_circular_bias(n1, n2):
7102                         """
7103                         RDEPEND is stronger than PDEPEND and this function
7104                         measures such a strength bias within a circular
7105                         dependency relationship.
7106                         """
7107                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7108                                 ignore_priority=priority_range.ignore_medium_soft)
7109                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7110                                 ignore_priority=priority_range.ignore_medium_soft)
7111                         if n1_n2_medium == n2_n1_medium:
7112                                 return 0
7113                         elif n1_n2_medium:
7114                                 return 1
7115                         return -1
7116                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7117                 retlist=[]
7118                 # Contains uninstall tasks that have been scheduled to
7119                 # occur after overlapping blockers have been installed.
7120                 scheduled_uninstalls = set()
7121                 # Contains any Uninstall tasks that have been ignored
7122                 # in order to avoid the circular deps code path. These
7123                 # correspond to blocker conflicts that could not be
7124                 # resolved.
7125                 ignored_uninstall_tasks = set()
7126                 have_uninstall_task = False
7127                 complete = "complete" in self.myparams
7128                 asap_nodes = []
7129
7130                 def get_nodes(**kwargs):
7131                         """
7132                         Returns leaf nodes excluding Uninstall instances
7133                         since those should be executed as late as possible.
7134                         """
7135                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7136                                 if isinstance(node, Package) and \
7137                                         (node.operation != "uninstall" or \
7138                                         node in scheduled_uninstalls)]
7139
7140                 # sys-apps/portage needs special treatment if ROOT="/"
7141                 running_root = self._running_root.root
7142                 from portage.const import PORTAGE_PACKAGE_ATOM
7143                 runtime_deps = InternalPackageSet(
7144                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7145                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7146                         PORTAGE_PACKAGE_ATOM)
7147                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7148                         PORTAGE_PACKAGE_ATOM)
7149
7150                 if running_portage:
7151                         running_portage = running_portage[0]
7152                 else:
7153                         running_portage = None
7154
7155                 if replacement_portage:
7156                         replacement_portage = replacement_portage[0]
7157                 else:
7158                         replacement_portage = None
7159
7160                 if replacement_portage == running_portage:
7161                         replacement_portage = None
7162
7163                 if replacement_portage is not None:
7164                         # update from running_portage to replacement_portage asap
7165                         asap_nodes.append(replacement_portage)
7166
7167                 if running_portage is not None:
7168                         try:
7169                                 portage_rdepend = self._select_atoms_highest_available(
7170                                         running_root, running_portage.metadata["RDEPEND"],
7171                                         myuse=running_portage.use.enabled,
7172                                         parent=running_portage, strict=False)
7173                         except portage.exception.InvalidDependString, e:
7174                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7175                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7176                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7177                                 del e
7178                                 portage_rdepend = []
7179                         runtime_deps.update(atom for atom in portage_rdepend \
7180                                 if not atom.startswith("!"))
7181
7182                 def gather_deps(ignore_priority, mergeable_nodes,
7183                         selected_nodes, node):
7184                         """
7185                         Recursively gather a group of nodes that RDEPEND on
7186                         eachother. This ensures that they are merged as a group
7187                         and get their RDEPENDs satisfied as soon as possible.
7188                         """
7189                         if node in selected_nodes:
7190                                 return True
7191                         if node not in mergeable_nodes:
7192                                 return False
7193                         if node == replacement_portage and \
7194                                 mygraph.child_nodes(node,
7195                                 ignore_priority=priority_range.ignore_medium_soft):
7196                                 # Make sure that portage always has all of it's
7197                                 # RDEPENDs installed first.
7198                                 return False
7199                         selected_nodes.add(node)
7200                         for child in mygraph.child_nodes(node,
7201                                 ignore_priority=ignore_priority):
7202                                 if not gather_deps(ignore_priority,
7203                                         mergeable_nodes, selected_nodes, child):
7204                                         return False
7205                         return True
7206
7207                 def ignore_uninst_or_med(priority):
7208                         if priority is BlockerDepPriority.instance:
7209                                 return True
7210                         return priority_range.ignore_medium(priority)
7211
7212                 def ignore_uninst_or_med_soft(priority):
7213                         if priority is BlockerDepPriority.instance:
7214                                 return True
7215                         return priority_range.ignore_medium_soft(priority)
7216
7217                 tree_mode = "--tree" in self.myopts
7218                 # Tracks whether or not the current iteration should prefer asap_nodes
7219                 # if available.  This is set to False when the previous iteration
7220                 # failed to select any nodes.  It is reset whenever nodes are
7221                 # successfully selected.
7222                 prefer_asap = True
7223
7224                 # Controls whether or not the current iteration should drop edges that
7225                 # are "satisfied" by installed packages, in order to solve circular
7226                 # dependencies. The deep runtime dependencies of installed packages are
7227                 # not checked in this case (bug #199856), so it must be avoided
7228                 # whenever possible.
7229                 drop_satisfied = False
7230
7231                 # State of variables for successive iterations that loosen the
7232                 # criteria for node selection.
7233                 #
7234                 # iteration   prefer_asap   drop_satisfied
7235                 # 1           True          False
7236                 # 2           False         False
7237                 # 3           False         True
7238                 #
7239                 # If no nodes are selected on the last iteration, it is due to
7240                 # unresolved blockers or circular dependencies.
7241
7242                 while not mygraph.empty():
7243                         self.spinner.update()
7244                         selected_nodes = None
7245                         ignore_priority = None
7246                         if drop_satisfied or (prefer_asap and asap_nodes):
7247                                 priority_range = DepPrioritySatisfiedRange
7248                         else:
7249                                 priority_range = DepPriorityNormalRange
7250                         if prefer_asap and asap_nodes:
7251                                 # ASAP nodes are merged before their soft deps. Go ahead and
7252                                 # select root nodes here if necessary, since it's typical for
7253                                 # the parent to have been removed from the graph already.
7254                                 asap_nodes = [node for node in asap_nodes \
7255                                         if mygraph.contains(node)]
7256                                 for node in asap_nodes:
7257                                         if not mygraph.child_nodes(node,
7258                                                 ignore_priority=priority_range.ignore_soft):
7259                                                 selected_nodes = [node]
7260                                                 asap_nodes.remove(node)
7261                                                 break
7262                         if not selected_nodes and \
7263                                 not (prefer_asap and asap_nodes):
7264                                 for i in xrange(priority_range.NONE,
7265                                         priority_range.MEDIUM_SOFT + 1):
7266                                         ignore_priority = priority_range.ignore_priority[i]
7267                                         nodes = get_nodes(ignore_priority=ignore_priority)
7268                                         if nodes:
7269                                                 # If there is a mix of uninstall nodes with other
7270                                                 # types, save the uninstall nodes for later since
7271                                                 # sometimes a merge node will render an uninstall
7272                                                 # node unnecessary (due to occupying the same slot),
7273                                                 # and we want to avoid executing a separate uninstall
7274                                                 # task in that case.
7275                                                 if len(nodes) > 1:
7276                                                         good_uninstalls = []
7277                                                         with_some_uninstalls_excluded = []
7278                                                         for node in nodes:
7279                                                                 if node.operation == "uninstall":
7280                                                                         slot_node = self.mydbapi[node.root
7281                                                                                 ].match_pkgs(node.slot_atom)
7282                                                                         if slot_node and \
7283                                                                                 slot_node[0].operation == "merge":
7284                                                                                 continue
7285                                                                         good_uninstalls.append(node)
7286                                                                 with_some_uninstalls_excluded.append(node)
7287                                                         if good_uninstalls:
7288                                                                 nodes = good_uninstalls
7289                                                         elif with_some_uninstalls_excluded:
7290                                                                 nodes = with_some_uninstalls_excluded
7291                                                         else:
7292                                                                 nodes = nodes
7293
7294                                                 if ignore_priority is None and not tree_mode:
7295                                                         # Greedily pop all of these nodes since no
7296                                                         # relationship has been ignored. This optimization
7297                                                         # destroys --tree output, so it's disabled in tree
7298                                                         # mode.
7299                                                         selected_nodes = nodes
7300                                                 else:
7301                                                         # For optimal merge order:
7302                                                         #  * Only pop one node.
7303                                                         #  * Removing a root node (node without a parent)
7304                                                         #    will not produce a leaf node, so avoid it.
7305                                                         #  * It's normal for a selected uninstall to be a
7306                                                         #    root node, so don't check them for parents.
7307                                                         for node in nodes:
7308                                                                 if node.operation == "uninstall" or \
7309                                                                         mygraph.parent_nodes(node):
7310                                                                         selected_nodes = [node]
7311                                                                         break
7312
7313                                                 if selected_nodes:
7314                                                         break
7315
7316                         if not selected_nodes:
7317                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7318                                 if nodes:
7319                                         mergeable_nodes = set(nodes)
7320                                         if prefer_asap and asap_nodes:
7321                                                 nodes = asap_nodes
7322                                         for i in xrange(priority_range.SOFT,
7323                                                 priority_range.MEDIUM_SOFT + 1):
7324                                                 ignore_priority = priority_range.ignore_priority[i]
7325                                                 for node in nodes:
7326                                                         if not mygraph.parent_nodes(node):
7327                                                                 continue
7328                                                         selected_nodes = set()
7329                                                         if gather_deps(ignore_priority,
7330                                                                 mergeable_nodes, selected_nodes, node):
7331                                                                 break
7332                                                         else:
7333                                                                 selected_nodes = None
7334                                                 if selected_nodes:
7335                                                         break
7336
7337                                         if prefer_asap and asap_nodes and not selected_nodes:
7338                                                 # We failed to find any asap nodes to merge, so ignore
7339                                                 # them for the next iteration.
7340                                                 prefer_asap = False
7341                                                 continue
7342
7343                         if selected_nodes and ignore_priority is not None:
7344                                 # Try to merge ignored medium_soft deps as soon as possible
7345                                 # if they're not satisfied by installed packages.
7346                                 for node in selected_nodes:
7347                                         children = set(mygraph.child_nodes(node))
7348                                         soft = children.difference(
7349                                                 mygraph.child_nodes(node,
7350                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7351                                         medium_soft = children.difference(
7352                                                 mygraph.child_nodes(node,
7353                                                         ignore_priority = \
7354                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7355                                         medium_soft.difference_update(soft)
7356                                         for child in medium_soft:
7357                                                 if child in selected_nodes:
7358                                                         continue
7359                                                 if child in asap_nodes:
7360                                                         continue
7361                                                 asap_nodes.append(child)
7362
7363                         if selected_nodes and len(selected_nodes) > 1:
7364                                 if not isinstance(selected_nodes, list):
7365                                         selected_nodes = list(selected_nodes)
7366                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7367
7368                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7369                                 # An Uninstall task needs to be executed in order to
7370                                 # avoid conflict if possible.
7371
7372                                 if drop_satisfied:
7373                                         priority_range = DepPrioritySatisfiedRange
7374                                 else:
7375                                         priority_range = DepPriorityNormalRange
7376
7377                                 mergeable_nodes = get_nodes(
7378                                         ignore_priority=ignore_uninst_or_med)
7379
7380                                 min_parent_deps = None
7381                                 uninst_task = None
7382                                 for task in myblocker_uninstalls.leaf_nodes():
7383                                         # Do some sanity checks so that system or world packages
7384                                         # don't get uninstalled inappropriately here (only really
7385                                         # necessary when --complete-graph has not been enabled).
7386
7387                                         if task in ignored_uninstall_tasks:
7388                                                 continue
7389
7390                                         if task in scheduled_uninstalls:
7391                                                 # It's been scheduled but it hasn't
7392                                                 # been executed yet due to dependence
7393                                                 # on installation of blocking packages.
7394                                                 continue
7395
7396                                         root_config = self.roots[task.root]
7397                                         inst_pkg = self._pkg_cache[
7398                                                 ("installed", task.root, task.cpv, "nomerge")]
7399
7400                                         if self.digraph.contains(inst_pkg):
7401                                                 continue
7402
7403                                         forbid_overlap = False
7404                                         heuristic_overlap = False
7405                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7406                                                 if blocker.eapi in ("0", "1"):
7407                                                         heuristic_overlap = True
7408                                                 elif blocker.atom.blocker.overlap.forbid:
7409                                                         forbid_overlap = True
7410                                                         break
7411                                         if forbid_overlap and running_root == task.root:
7412                                                 continue
7413
7414                                         if heuristic_overlap and running_root == task.root:
7415                                                 # Never uninstall sys-apps/portage or it's essential
7416                                                 # dependencies, except through replacement.
7417                                                 try:
7418                                                         runtime_dep_atoms = \
7419                                                                 list(runtime_deps.iterAtomsForPackage(task))
7420                                                 except portage.exception.InvalidDependString, e:
7421                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7422                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7423                                                                 (task.root, task.cpv, e), noiselevel=-1)
7424                                                         del e
7425                                                         continue
7426
7427                                                 # Don't uninstall a runtime dep if it appears
7428                                                 # to be the only suitable one installed.
7429                                                 skip = False
7430                                                 vardb = root_config.trees["vartree"].dbapi
7431                                                 for atom in runtime_dep_atoms:
7432                                                         other_version = None
7433                                                         for pkg in vardb.match_pkgs(atom):
7434                                                                 if pkg.cpv == task.cpv and \
7435                                                                         pkg.metadata["COUNTER"] == \
7436                                                                         task.metadata["COUNTER"]:
7437                                                                         continue
7438                                                                 other_version = pkg
7439                                                                 break
7440                                                         if other_version is None:
7441                                                                 skip = True
7442                                                                 break
7443                                                 if skip:
7444                                                         continue
7445
7446                                                 # For packages in the system set, don't take
7447                                                 # any chances. If the conflict can't be resolved
7448                                                 # by a normal replacement operation then abort.
7449                                                 skip = False
7450                                                 try:
7451                                                         for atom in root_config.sets[
7452                                                                 "system"].iterAtomsForPackage(task):
7453                                                                 skip = True
7454                                                                 break
7455                                                 except portage.exception.InvalidDependString, e:
7456                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7457                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7458                                                                 (task.root, task.cpv, e), noiselevel=-1)
7459                                                         del e
7460                                                         skip = True
7461                                                 if skip:
7462                                                         continue
7463
7464                                         # Note that the world check isn't always
7465                                         # necessary since self._complete_graph() will
7466                                         # add all packages from the system and world sets to the
7467                                         # graph. This just allows unresolved conflicts to be
7468                                         # detected as early as possible, which makes it possible
7469                                         # to avoid calling self._complete_graph() when it is
7470                                         # unnecessary due to blockers triggering an abortion.
7471                                         if not complete:
7472                                                 # For packages in the world set, go ahead an uninstall
7473                                                 # when necessary, as long as the atom will be satisfied
7474                                                 # in the final state.
7475                                                 graph_db = self.mydbapi[task.root]
7476                                                 skip = False
7477                                                 try:
7478                                                         for atom in root_config.sets[
7479                                                                 "world"].iterAtomsForPackage(task):
7480                                                                 satisfied = False
7481                                                                 for pkg in graph_db.match_pkgs(atom):
7482                                                                         if pkg == inst_pkg:
7483                                                                                 continue
7484                                                                         satisfied = True
7485                                                                         break
7486                                                                 if not satisfied:
7487                                                                         skip = True
7488                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7489                                                                         break
7490                                                 except portage.exception.InvalidDependString, e:
7491                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7492                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7493                                                                 (task.root, task.cpv, e), noiselevel=-1)
7494                                                         del e
7495                                                         skip = True
7496                                                 if skip:
7497                                                         continue
7498
7499                                         # Check the deps of parent nodes to ensure that
7500                                         # the chosen task produces a leaf node. Maybe
7501                                         # this can be optimized some more to make the
7502                                         # best possible choice, but the current algorithm
7503                                         # is simple and should be near optimal for most
7504                                         # common cases.
7505                                         mergeable_parent = False
7506                                         parent_deps = set()
7507                                         for parent in mygraph.parent_nodes(task):
7508                                                 parent_deps.update(mygraph.child_nodes(parent,
7509                                                         ignore_priority=priority_range.ignore_medium_soft))
7510                                                 if parent in mergeable_nodes and \
7511                                                         gather_deps(ignore_uninst_or_med_soft,
7512                                                         mergeable_nodes, set(), parent):
7513                                                         mergeable_parent = True
7514
7515                                         if not mergeable_parent:
7516                                                 continue
7517
7518                                         parent_deps.remove(task)
7519                                         if min_parent_deps is None or \
7520                                                 len(parent_deps) < min_parent_deps:
7521                                                 min_parent_deps = len(parent_deps)
7522                                                 uninst_task = task
7523
7524                                 if uninst_task is not None:
7525                                         # The uninstall is performed only after blocking
7526                                         # packages have been merged on top of it. File
7527                                         # collisions between blocking packages are detected
7528                                         # and removed from the list of files to be uninstalled.
7529                                         scheduled_uninstalls.add(uninst_task)
7530                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7531
7532                                         # Reverse the parent -> uninstall edges since we want
7533                                         # to do the uninstall after blocking packages have
7534                                         # been merged on top of it.
7535                                         mygraph.remove(uninst_task)
7536                                         for blocked_pkg in parent_nodes:
7537                                                 mygraph.add(blocked_pkg, uninst_task,
7538                                                         priority=BlockerDepPriority.instance)
7539                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7540                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7541                                                         priority=BlockerDepPriority.instance)
7542
7543                                         # Reset the state variables for leaf node selection and
7544                                         # continue trying to select leaf nodes.
7545                                         prefer_asap = True
7546                                         drop_satisfied = False
7547                                         continue
7548
7549                         if not selected_nodes:
7550                                 # Only select root nodes as a last resort. This case should
7551                                 # only trigger when the graph is nearly empty and the only
7552                                 # remaining nodes are isolated (no parents or children). Since
7553                                 # the nodes must be isolated, ignore_priority is not needed.
7554                                 selected_nodes = get_nodes()
7555
7556                         if not selected_nodes and not drop_satisfied:
7557                                 drop_satisfied = True
7558                                 continue
7559
7560                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7561                                 # If possible, drop an uninstall task here in order to avoid
7562                                 # the circular deps code path. The corresponding blocker will
7563                                 # still be counted as an unresolved conflict.
7564                                 uninst_task = None
7565                                 for node in myblocker_uninstalls.leaf_nodes():
7566                                         try:
7567                                                 mygraph.remove(node)
7568                                         except KeyError:
7569                                                 pass
7570                                         else:
7571                                                 uninst_task = node
7572                                                 ignored_uninstall_tasks.add(node)
7573                                                 break
7574
7575                                 if uninst_task is not None:
7576                                         # Reset the state variables for leaf node selection and
7577                                         # continue trying to select leaf nodes.
7578                                         prefer_asap = True
7579                                         drop_satisfied = False
7580                                         continue
7581
7582                         if not selected_nodes:
7583                                 self._circular_deps_for_display = mygraph
7584                                 raise self._unknown_internal_error()
7585
7586                         # At this point, we've succeeded in selecting one or more nodes, so
7587                         # reset state variables for leaf node selection.
7588                         prefer_asap = True
7589                         drop_satisfied = False
7590
7591                         mygraph.difference_update(selected_nodes)
7592
7593                         for node in selected_nodes:
7594                                 if isinstance(node, Package) and \
7595                                         node.operation == "nomerge":
7596                                         continue
7597
7598                                 # Handle interactions between blockers
7599                                 # and uninstallation tasks.
7600                                 solved_blockers = set()
7601                                 uninst_task = None
7602                                 if isinstance(node, Package) and \
7603                                         "uninstall" == node.operation:
7604                                         have_uninstall_task = True
7605                                         uninst_task = node
7606                                 else:
7607                                         vardb = self.trees[node.root]["vartree"].dbapi
7608                                         previous_cpv = vardb.match(node.slot_atom)
7609                                         if previous_cpv:
7610                                                 # The package will be replaced by this one, so remove
7611                                                 # the corresponding Uninstall task if necessary.
7612                                                 previous_cpv = previous_cpv[0]
7613                                                 uninst_task = \
7614                                                         ("installed", node.root, previous_cpv, "uninstall")
7615                                                 try:
7616                                                         mygraph.remove(uninst_task)
7617                                                 except KeyError:
7618                                                         pass
7619
7620                                 if uninst_task is not None and \
7621                                         uninst_task not in ignored_uninstall_tasks and \
7622                                         myblocker_uninstalls.contains(uninst_task):
7623                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7624                                         myblocker_uninstalls.remove(uninst_task)
7625                                         # Discard any blockers that this Uninstall solves.
7626                                         for blocker in blocker_nodes:
7627                                                 if not myblocker_uninstalls.child_nodes(blocker):
7628                                                         myblocker_uninstalls.remove(blocker)
7629                                                         solved_blockers.add(blocker)
7630
7631                                 retlist.append(node)
7632
7633                                 if (isinstance(node, Package) and \
7634                                         "uninstall" == node.operation) or \
7635                                         (uninst_task is not None and \
7636                                         uninst_task in scheduled_uninstalls):
7637                                         # Include satisfied blockers in the merge list
7638                                         # since the user might be interested and also
7639                                         # it serves as an indicator that blocking packages
7640                                         # will be temporarily installed simultaneously.
7641                                         for blocker in solved_blockers:
7642                                                 retlist.append(Blocker(atom=blocker.atom,
7643                                                         root=blocker.root, eapi=blocker.eapi,
7644                                                         satisfied=True))
7645
7646                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7647                 for node in myblocker_uninstalls.root_nodes():
7648                         unsolvable_blockers.add(node)
7649
7650                 for blocker in unsolvable_blockers:
7651                         retlist.append(blocker)
7652
7653                 # If any Uninstall tasks need to be executed in order
7654                 # to avoid a conflict, complete the graph with any
7655                 # dependencies that may have been initially
7656                 # neglected (to ensure that unsafe Uninstall tasks
7657                 # are properly identified and blocked from execution).
7658                 if have_uninstall_task and \
7659                         not complete and \
7660                         not unsolvable_blockers:
7661                         self.myparams.add("complete")
7662                         raise self._serialize_tasks_retry("")
7663
7664                 if unsolvable_blockers and \
7665                         not self._accept_blocker_conflicts():
7666                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7667                         self._serialized_tasks_cache = retlist[:]
7668                         self._scheduler_graph = scheduler_graph
7669                         raise self._unknown_internal_error()
7670
7671                 if self._slot_collision_info and \
7672                         not self._accept_blocker_conflicts():
7673                         self._serialized_tasks_cache = retlist[:]
7674                         self._scheduler_graph = scheduler_graph
7675                         raise self._unknown_internal_error()
7676
7677                 return retlist, scheduler_graph
7678
7679         def _show_circular_deps(self, mygraph):
7680                 # No leaf nodes are available, so we have a circular
7681                 # dependency panic situation.  Reduce the noise level to a
7682                 # minimum via repeated elimination of root nodes since they
7683                 # have no parents and thus can not be part of a cycle.
7684                 while True:
7685                         root_nodes = mygraph.root_nodes(
7686                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7687                         if not root_nodes:
7688                                 break
7689                         mygraph.difference_update(root_nodes)
7690                 # Display the USE flags that are enabled on nodes that are part
7691                 # of dependency cycles in case that helps the user decide to
7692                 # disable some of them.
7693                 display_order = []
7694                 tempgraph = mygraph.copy()
7695                 while not tempgraph.empty():
7696                         nodes = tempgraph.leaf_nodes()
7697                         if not nodes:
7698                                 node = tempgraph.order[0]
7699                         else:
7700                                 node = nodes[0]
7701                         display_order.append(node)
7702                         tempgraph.remove(node)
7703                 display_order.reverse()
7704                 self.myopts.pop("--quiet", None)
7705                 self.myopts.pop("--verbose", None)
7706                 self.myopts["--tree"] = True
7707                 portage.writemsg("\n\n", noiselevel=-1)
7708                 self.display(display_order)
7709                 prefix = colorize("BAD", " * ")
7710                 portage.writemsg("\n", noiselevel=-1)
7711                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7712                         noiselevel=-1)
7713                 portage.writemsg("\n", noiselevel=-1)
7714                 mygraph.debug_print()
7715                 portage.writemsg("\n", noiselevel=-1)
7716                 portage.writemsg(prefix + "Note that circular dependencies " + \
7717                         "can often be avoided by temporarily\n", noiselevel=-1)
7718                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7719                         "optional dependencies.\n", noiselevel=-1)
7720
7721         def _show_merge_list(self):
7722                 if self._serialized_tasks_cache is not None and \
7723                         not (self._displayed_list and \
7724                         (self._displayed_list == self._serialized_tasks_cache or \
7725                         self._displayed_list == \
7726                                 list(reversed(self._serialized_tasks_cache)))):
7727                         display_list = self._serialized_tasks_cache[:]
7728                         if "--tree" in self.myopts:
7729                                 display_list.reverse()
7730                         self.display(display_list)
7731
7732         def _show_unsatisfied_blockers(self, blockers):
7733                 self._show_merge_list()
7734                 msg = "Error: The above package list contains " + \
7735                         "packages which cannot be installed " + \
7736                         "at the same time on the same system."
7737                 prefix = colorize("BAD", " * ")
7738                 from textwrap import wrap
7739                 portage.writemsg("\n", noiselevel=-1)
7740                 for line in wrap(msg, 70):
7741                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7742
7743                 # Display the conflicting packages along with the packages
7744                 # that pulled them in. This is helpful for troubleshooting
7745                 # cases in which blockers don't solve automatically and
7746                 # the reasons are not apparent from the normal merge list
7747                 # display.
7748
7749                 conflict_pkgs = {}
7750                 for blocker in blockers:
7751                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7752                                 self._blocker_parents.parent_nodes(blocker)):
7753                                 parent_atoms = self._parent_atoms.get(pkg)
7754                                 if not parent_atoms:
7755                                         atom = self._blocked_world_pkgs.get(pkg)
7756                                         if atom is not None:
7757                                                 parent_atoms = set([("@world", atom)])
7758                                 if parent_atoms:
7759                                         conflict_pkgs[pkg] = parent_atoms
7760
7761                 if conflict_pkgs:
7762                         # Reduce noise by pruning packages that are only
7763                         # pulled in by other conflict packages.
7764                         pruned_pkgs = set()
7765                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7766                                 relevant_parent = False
7767                                 for parent, atom in parent_atoms:
7768                                         if parent not in conflict_pkgs:
7769                                                 relevant_parent = True
7770                                                 break
7771                                 if not relevant_parent:
7772                                         pruned_pkgs.add(pkg)
7773                         for pkg in pruned_pkgs:
7774                                 del conflict_pkgs[pkg]
7775
7776                 if conflict_pkgs:
7777                         msg = []
7778                         msg.append("\n")
7779                         indent = "  "
7780                         # Max number of parents shown, to avoid flooding the display.
7781                         max_parents = 3
7782                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7783
7784                                 pruned_list = set()
7785
7786                                 # Prefer packages that are not directly involved in a conflict.
7787                                 for parent_atom in parent_atoms:
7788                                         if len(pruned_list) >= max_parents:
7789                                                 break
7790                                         parent, atom = parent_atom
7791                                         if parent not in conflict_pkgs:
7792                                                 pruned_list.add(parent_atom)
7793
7794                                 for parent_atom in parent_atoms:
7795                                         if len(pruned_list) >= max_parents:
7796                                                 break
7797                                         pruned_list.add(parent_atom)
7798
7799                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7800                                 msg.append(indent + "%s pulled in by\n" % pkg)
7801
7802                                 for parent_atom in pruned_list:
7803                                         parent, atom = parent_atom
7804                                         msg.append(2*indent)
7805                                         if isinstance(parent,
7806                                                 (PackageArg, AtomArg)):
7807                                                 # For PackageArg and AtomArg types, it's
7808                                                 # redundant to display the atom attribute.
7809                                                 msg.append(str(parent))
7810                                         else:
7811                                                 # Display the specific atom from SetArg or
7812                                                 # Package types.
7813                                                 msg.append("%s required by %s" % (atom, parent))
7814                                         msg.append("\n")
7815
7816                                 if omitted_parents:
7817                                         msg.append(2*indent)
7818                                         msg.append("(and %d more)\n" % omitted_parents)
7819
7820                                 msg.append("\n")
7821
7822                         sys.stderr.write("".join(msg))
7823                         sys.stderr.flush()
7824
7825                 if "--quiet" not in self.myopts:
7826                         show_blocker_docs_link()
7827
7828         def display(self, mylist, favorites=[], verbosity=None):
7829
7830                 # This is used to prevent display_problems() from
7831                 # redundantly displaying this exact same merge list
7832                 # again via _show_merge_list().
7833                 self._displayed_list = mylist
7834
7835                 if verbosity is None:
7836                         verbosity = ("--quiet" in self.myopts and 1 or \
7837                                 "--verbose" in self.myopts and 3 or 2)
7838                 favorites_set = InternalPackageSet(favorites)
7839                 oneshot = "--oneshot" in self.myopts or \
7840                         "--onlydeps" in self.myopts
7841                 columns = "--columns" in self.myopts
7842                 changelogs=[]
7843                 p=[]
7844                 blockers = []
7845
7846                 counters = PackageCounters()
7847
7848                 if verbosity == 1 and "--verbose" not in self.myopts:
7849                         def create_use_string(*args):
7850                                 return ""
7851                 else:
7852                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7853                                 old_iuse, old_use,
7854                                 is_new, reinst_flags,
7855                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7856                                 alphabetical=("--alphabetical" in self.myopts)):
7857                                 enabled = []
7858                                 if alphabetical:
7859                                         disabled = enabled
7860                                         removed = enabled
7861                                 else:
7862                                         disabled = []
7863                                         removed = []
7864                                 cur_iuse = set(cur_iuse)
7865                                 enabled_flags = cur_iuse.intersection(cur_use)
7866                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7867                                 any_iuse = cur_iuse.union(old_iuse)
7868                                 any_iuse = list(any_iuse)
7869                                 any_iuse.sort()
7870                                 for flag in any_iuse:
7871                                         flag_str = None
7872                                         isEnabled = False
7873                                         reinst_flag = reinst_flags and flag in reinst_flags
7874                                         if flag in enabled_flags:
7875                                                 isEnabled = True
7876                                                 if is_new or flag in old_use and \
7877                                                         (all_flags or reinst_flag):
7878                                                         flag_str = red(flag)
7879                                                 elif flag not in old_iuse:
7880                                                         flag_str = yellow(flag) + "%*"
7881                                                 elif flag not in old_use:
7882                                                         flag_str = green(flag) + "*"
7883                                         elif flag in removed_iuse:
7884                                                 if all_flags or reinst_flag:
7885                                                         flag_str = yellow("-" + flag) + "%"
7886                                                         if flag in old_use:
7887                                                                 flag_str += "*"
7888                                                         flag_str = "(" + flag_str + ")"
7889                                                         removed.append(flag_str)
7890                                                 continue
7891                                         else:
7892                                                 if is_new or flag in old_iuse and \
7893                                                         flag not in old_use and \
7894                                                         (all_flags or reinst_flag):
7895                                                         flag_str = blue("-" + flag)
7896                                                 elif flag not in old_iuse:
7897                                                         flag_str = yellow("-" + flag)
7898                                                         if flag not in iuse_forced:
7899                                                                 flag_str += "%"
7900                                                 elif flag in old_use:
7901                                                         flag_str = green("-" + flag) + "*"
7902                                         if flag_str:
7903                                                 if flag in iuse_forced:
7904                                                         flag_str = "(" + flag_str + ")"
7905                                                 if isEnabled:
7906                                                         enabled.append(flag_str)
7907                                                 else:
7908                                                         disabled.append(flag_str)
7909
7910                                 if alphabetical:
7911                                         ret = " ".join(enabled)
7912                                 else:
7913                                         ret = " ".join(enabled + disabled + removed)
7914                                 if ret:
7915                                         ret = '%s="%s" ' % (name, ret)
7916                                 return ret
7917
7918                 repo_display = RepoDisplay(self.roots)
7919
7920                 tree_nodes = []
7921                 display_list = []
7922                 mygraph = self.digraph.copy()
7923
7924                 # If there are any Uninstall instances, add the corresponding
7925                 # blockers to the digraph (useful for --tree display).
7926
7927                 executed_uninstalls = set(node for node in mylist \
7928                         if isinstance(node, Package) and node.operation == "unmerge")
7929
7930                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7931                         uninstall_parents = \
7932                                 self._blocker_uninstalls.parent_nodes(uninstall)
7933                         if not uninstall_parents:
7934                                 continue
7935
7936                         # Remove the corresponding "nomerge" node and substitute
7937                         # the Uninstall node.
7938                         inst_pkg = self._pkg_cache[
7939                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7940                         try:
7941                                 mygraph.remove(inst_pkg)
7942                         except KeyError:
7943                                 pass
7944
7945                         try:
7946                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7947                         except KeyError:
7948                                 inst_pkg_blockers = []
7949
7950                         # Break the Package -> Uninstall edges.
7951                         mygraph.remove(uninstall)
7952
7953                         # Resolution of a package's blockers
7954                         # depend on it's own uninstallation.
7955                         for blocker in inst_pkg_blockers:
7956                                 mygraph.add(uninstall, blocker)
7957
7958                         # Expand Package -> Uninstall edges into
7959                         # Package -> Blocker -> Uninstall edges.
7960                         for blocker in uninstall_parents:
7961                                 mygraph.add(uninstall, blocker)
7962                                 for parent in self._blocker_parents.parent_nodes(blocker):
7963                                         if parent != inst_pkg:
7964                                                 mygraph.add(blocker, parent)
7965
7966                         # If the uninstall task did not need to be executed because
7967                         # of an upgrade, display Blocker -> Upgrade edges since the
7968                         # corresponding Blocker -> Uninstall edges will not be shown.
7969                         upgrade_node = \
7970                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7971                         if upgrade_node is not None and \
7972                                 uninstall not in executed_uninstalls:
7973                                 for blocker in uninstall_parents:
7974                                         mygraph.add(upgrade_node, blocker)
7975
7976                 unsatisfied_blockers = []
7977                 i = 0
7978                 depth = 0
7979                 shown_edges = set()
7980                 for x in mylist:
7981                         if isinstance(x, Blocker) and not x.satisfied:
7982                                 unsatisfied_blockers.append(x)
7983                                 continue
7984                         graph_key = x
7985                         if "--tree" in self.myopts:
7986                                 depth = len(tree_nodes)
7987                                 while depth and graph_key not in \
7988                                         mygraph.child_nodes(tree_nodes[depth-1]):
7989                                                 depth -= 1
7990                                 if depth:
7991                                         tree_nodes = tree_nodes[:depth]
7992                                         tree_nodes.append(graph_key)
7993                                         display_list.append((x, depth, True))
7994                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7995                                 else:
7996                                         traversed_nodes = set() # prevent endless circles
7997                                         traversed_nodes.add(graph_key)
7998                                         def add_parents(current_node, ordered):
7999                                                 parent_nodes = None
8000                                                 # Do not traverse to parents if this node is an
8001                                                 # an argument or a direct member of a set that has
8002                                                 # been specified as an argument (system or world).
8003                                                 if current_node not in self._set_nodes:
8004                                                         parent_nodes = mygraph.parent_nodes(current_node)
8005                                                 if parent_nodes:
8006                                                         child_nodes = set(mygraph.child_nodes(current_node))
8007                                                         selected_parent = None
8008                                                         # First, try to avoid a direct cycle.
8009                                                         for node in parent_nodes:
8010                                                                 if not isinstance(node, (Blocker, Package)):
8011                                                                         continue
8012                                                                 if node not in traversed_nodes and \
8013                                                                         node not in child_nodes:
8014                                                                         edge = (current_node, node)
8015                                                                         if edge in shown_edges:
8016                                                                                 continue
8017                                                                         selected_parent = node
8018                                                                         break
8019                                                         if not selected_parent:
8020                                                                 # A direct cycle is unavoidable.
8021                                                                 for node in parent_nodes:
8022                                                                         if not isinstance(node, (Blocker, Package)):
8023                                                                                 continue
8024                                                                         if node not in traversed_nodes:
8025                                                                                 edge = (current_node, node)
8026                                                                                 if edge in shown_edges:
8027                                                                                         continue
8028                                                                                 selected_parent = node
8029                                                                                 break
8030                                                         if selected_parent:
8031                                                                 shown_edges.add((current_node, selected_parent))
8032                                                                 traversed_nodes.add(selected_parent)
8033                                                                 add_parents(selected_parent, False)
8034                                                 display_list.append((current_node,
8035                                                         len(tree_nodes), ordered))
8036                                                 tree_nodes.append(current_node)
8037                                         tree_nodes = []
8038                                         add_parents(graph_key, True)
8039                         else:
8040                                 display_list.append((x, depth, True))
8041                 mylist = display_list
8042                 for x in unsatisfied_blockers:
8043                         mylist.append((x, 0, True))
8044
8045                 last_merge_depth = 0
8046                 for i in xrange(len(mylist)-1,-1,-1):
8047                         graph_key, depth, ordered = mylist[i]
8048                         if not ordered and depth == 0 and i > 0 \
8049                                 and graph_key == mylist[i-1][0] and \
8050                                 mylist[i-1][1] == 0:
8051                                 # An ordered node got a consecutive duplicate when the tree was
8052                                 # being filled in.
8053                                 del mylist[i]
8054                                 continue
8055                         if ordered and graph_key[-1] != "nomerge":
8056                                 last_merge_depth = depth
8057                                 continue
8058                         if depth >= last_merge_depth or \
8059                                 i < len(mylist) - 1 and \
8060                                 depth >= mylist[i+1][1]:
8061                                         del mylist[i]
8062
8063                 from portage import flatten
8064                 from portage.dep import use_reduce, paren_reduce
8065                 # files to fetch list - avoids counting a same file twice
8066                 # in size display (verbose mode)
8067                 myfetchlist=[]
8068
8069                 # Use this set to detect when all the "repoadd" strings are "[0]"
8070                 # and disable the entire repo display in this case.
8071                 repoadd_set = set()
8072
8073                 for mylist_index in xrange(len(mylist)):
8074                         x, depth, ordered = mylist[mylist_index]
8075                         pkg_type = x[0]
8076                         myroot = x[1]
8077                         pkg_key = x[2]
8078                         portdb = self.trees[myroot]["porttree"].dbapi
8079                         bindb  = self.trees[myroot]["bintree"].dbapi
8080                         vardb = self.trees[myroot]["vartree"].dbapi
8081                         vartree = self.trees[myroot]["vartree"]
8082                         pkgsettings = self.pkgsettings[myroot]
8083
8084                         fetch=" "
8085                         indent = " " * depth
8086
8087                         if isinstance(x, Blocker):
8088                                 if x.satisfied:
8089                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8090                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8091                                 else:
8092                                         blocker_style = "PKG_BLOCKER"
8093                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8094                                 if ordered:
8095                                         counters.blocks += 1
8096                                         if x.satisfied:
8097                                                 counters.blocks_satisfied += 1
8098                                 resolved = portage.key_expand(
8099                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8100                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8101                                         addl += " " + colorize(blocker_style, resolved)
8102                                 else:
8103                                         addl = "[%s %s] %s%s" % \
8104                                                 (colorize(blocker_style, "blocks"),
8105                                                 addl, indent, colorize(blocker_style, resolved))
8106                                 block_parents = self._blocker_parents.parent_nodes(x)
8107                                 block_parents = set([pnode[2] for pnode in block_parents])
8108                                 block_parents = ", ".join(block_parents)
8109                                 if resolved!=x[2]:
8110                                         addl += colorize(blocker_style,
8111                                                 " (\"%s\" is blocking %s)") % \
8112                                                 (str(x.atom).lstrip("!"), block_parents)
8113                                 else:
8114                                         addl += colorize(blocker_style,
8115                                                 " (is blocking %s)") % block_parents
8116                                 if isinstance(x, Blocker) and x.satisfied:
8117                                         if columns:
8118                                                 continue
8119                                         p.append(addl)
8120                                 else:
8121                                         blockers.append(addl)
8122                         else:
8123                                 pkg_status = x[3]
8124                                 pkg_merge = ordered and pkg_status == "merge"
8125                                 if not pkg_merge and pkg_status == "merge":
8126                                         pkg_status = "nomerge"
8127                                 built = pkg_type != "ebuild"
8128                                 installed = pkg_type == "installed"
8129                                 pkg = x
8130                                 metadata = pkg.metadata
8131                                 ebuild_path = None
8132                                 repo_name = metadata["repository"]
8133                                 if pkg_type == "ebuild":
8134                                         ebuild_path = portdb.findname(pkg_key)
8135                                         if not ebuild_path: # shouldn't happen
8136                                                 raise portage.exception.PackageNotFound(pkg_key)
8137                                         repo_path_real = os.path.dirname(os.path.dirname(
8138                                                 os.path.dirname(ebuild_path)))
8139                                 else:
8140                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8141                                 pkg_use = list(pkg.use.enabled)
8142                                 try:
8143                                         restrict = flatten(use_reduce(paren_reduce(
8144                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8145                                 except portage.exception.InvalidDependString, e:
8146                                         if not pkg.installed:
8147                                                 show_invalid_depstring_notice(x,
8148                                                         pkg.metadata["RESTRICT"], str(e))
8149                                                 del e
8150                                                 return 1
8151                                         restrict = []
8152                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8153                                         "fetch" in restrict:
8154                                         fetch = red("F")
8155                                         if ordered:
8156                                                 counters.restrict_fetch += 1
8157                                         if portdb.fetch_check(pkg_key, pkg_use):
8158                                                 fetch = green("f")
8159                                                 if ordered:
8160                                                         counters.restrict_fetch_satisfied += 1
8161
8162                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8163                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8164                                 myoldbest = []
8165                                 myinslotlist = None
8166                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8167                                 if vardb.cpv_exists(pkg_key):
8168                                         addl="  "+yellow("R")+fetch+"  "
8169                                         if ordered:
8170                                                 if pkg_merge:
8171                                                         counters.reinst += 1
8172                                                 elif pkg_status == "uninstall":
8173                                                         counters.uninst += 1
8174                                 # filter out old-style virtual matches
8175                                 elif installed_versions and \
8176                                         portage.cpv_getkey(installed_versions[0]) == \
8177                                         portage.cpv_getkey(pkg_key):
8178                                         myinslotlist = vardb.match(pkg.slot_atom)
8179                                         # If this is the first install of a new-style virtual, we
8180                                         # need to filter out old-style virtual matches.
8181                                         if myinslotlist and \
8182                                                 portage.cpv_getkey(myinslotlist[0]) != \
8183                                                 portage.cpv_getkey(pkg_key):
8184                                                 myinslotlist = None
8185                                         if myinslotlist:
8186                                                 myoldbest = myinslotlist[:]
8187                                                 addl = "   " + fetch
8188                                                 if not portage.dep.cpvequal(pkg_key,
8189                                                         portage.best([pkg_key] + myoldbest)):
8190                                                         # Downgrade in slot
8191                                                         addl += turquoise("U")+blue("D")
8192                                                         if ordered:
8193                                                                 counters.downgrades += 1
8194                                                 else:
8195                                                         # Update in slot
8196                                                         addl += turquoise("U") + " "
8197                                                         if ordered:
8198                                                                 counters.upgrades += 1
8199                                         else:
8200                                                 # New slot, mark it new.
8201                                                 addl = " " + green("NS") + fetch + "  "
8202                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8203                                                 if ordered:
8204                                                         counters.newslot += 1
8205
8206                                         if "--changelog" in self.myopts:
8207                                                 inst_matches = vardb.match(pkg.slot_atom)
8208                                                 if inst_matches:
8209                                                         changelogs.extend(self.calc_changelog(
8210                                                                 portdb.findname(pkg_key),
8211                                                                 inst_matches[0], pkg_key))
8212                                 else:
8213                                         addl = " " + green("N") + " " + fetch + "  "
8214                                         if ordered:
8215                                                 counters.new += 1
8216
8217                                 verboseadd = ""
8218                                 repoadd = None
8219
8220                                 if True:
8221                                         # USE flag display
8222                                         forced_flags = set()
8223                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8224                                         forced_flags.update(pkgsettings.useforce)
8225                                         forced_flags.update(pkgsettings.usemask)
8226
8227                                         cur_use = [flag for flag in pkg.use.enabled \
8228                                                 if flag in pkg.iuse.all]
8229                                         cur_iuse = sorted(pkg.iuse.all)
8230
8231                                         if myoldbest and myinslotlist:
8232                                                 previous_cpv = myoldbest[0]
8233                                         else:
8234                                                 previous_cpv = pkg.cpv
8235                                         if vardb.cpv_exists(previous_cpv):
8236                                                 old_iuse, old_use = vardb.aux_get(
8237                                                                 previous_cpv, ["IUSE", "USE"])
8238                                                 old_iuse = list(set(
8239                                                         filter_iuse_defaults(old_iuse.split())))
8240                                                 old_iuse.sort()
8241                                                 old_use = old_use.split()
8242                                                 is_new = False
8243                                         else:
8244                                                 old_iuse = []
8245                                                 old_use = []
8246                                                 is_new = True
8247
8248                                         old_use = [flag for flag in old_use if flag in old_iuse]
8249
8250                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8251                                         use_expand.sort()
8252                                         use_expand.reverse()
8253                                         use_expand_hidden = \
8254                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8255
8256                                         def map_to_use_expand(myvals, forcedFlags=False,
8257                                                 removeHidden=True):
8258                                                 ret = {}
8259                                                 forced = {}
8260                                                 for exp in use_expand:
8261                                                         ret[exp] = []
8262                                                         forced[exp] = set()
8263                                                         for val in myvals[:]:
8264                                                                 if val.startswith(exp.lower()+"_"):
8265                                                                         if val in forced_flags:
8266                                                                                 forced[exp].add(val[len(exp)+1:])
8267                                                                         ret[exp].append(val[len(exp)+1:])
8268                                                                         myvals.remove(val)
8269                                                 ret["USE"] = myvals
8270                                                 forced["USE"] = [val for val in myvals \
8271                                                         if val in forced_flags]
8272                                                 if removeHidden:
8273                                                         for exp in use_expand_hidden:
8274                                                                 ret.pop(exp, None)
8275                                                 if forcedFlags:
8276                                                         return ret, forced
8277                                                 return ret
8278
8279                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8280                                         # are the only thing that triggered reinstallation.
8281                                         reinst_flags_map = {}
8282                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8283                                         reinst_expand_map = None
8284                                         if reinstall_for_flags:
8285                                                 reinst_flags_map = map_to_use_expand(
8286                                                         list(reinstall_for_flags), removeHidden=False)
8287                                                 for k in list(reinst_flags_map):
8288                                                         if not reinst_flags_map[k]:
8289                                                                 del reinst_flags_map[k]
8290                                                 if not reinst_flags_map.get("USE"):
8291                                                         reinst_expand_map = reinst_flags_map.copy()
8292                                                         reinst_expand_map.pop("USE", None)
8293                                         if reinst_expand_map and \
8294                                                 not set(reinst_expand_map).difference(
8295                                                 use_expand_hidden):
8296                                                 use_expand_hidden = \
8297                                                         set(use_expand_hidden).difference(
8298                                                         reinst_expand_map)
8299
8300                                         cur_iuse_map, iuse_forced = \
8301                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8302                                         cur_use_map = map_to_use_expand(cur_use)
8303                                         old_iuse_map = map_to_use_expand(old_iuse)
8304                                         old_use_map = map_to_use_expand(old_use)
8305
8306                                         use_expand.sort()
8307                                         use_expand.insert(0, "USE")
8308                                         
8309                                         for key in use_expand:
8310                                                 if key in use_expand_hidden:
8311                                                         continue
8312                                                 verboseadd += create_use_string(key.upper(),
8313                                                         cur_iuse_map[key], iuse_forced[key],
8314                                                         cur_use_map[key], old_iuse_map[key],
8315                                                         old_use_map[key], is_new,
8316                                                         reinst_flags_map.get(key))
8317
8318                                 if verbosity == 3:
8319                                         # size verbose
8320                                         mysize=0
8321                                         if pkg_type == "ebuild" and pkg_merge:
8322                                                 try:
8323                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8324                                                                 useflags=pkg_use, debug=self.edebug)
8325                                                 except portage.exception.InvalidDependString, e:
8326                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8327                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8328                                                         del e
8329                                                         return 1
8330                                                 if myfilesdict is None:
8331                                                         myfilesdict="[empty/missing/bad digest]"
8332                                                 else:
8333                                                         for myfetchfile in myfilesdict:
8334                                                                 if myfetchfile not in myfetchlist:
8335                                                                         mysize+=myfilesdict[myfetchfile]
8336                                                                         myfetchlist.append(myfetchfile)
8337                                                         if ordered:
8338                                                                 counters.totalsize += mysize
8339                                                 verboseadd += format_size(mysize)
8340
8341                                         # overlay verbose
8342                                         # assign index for a previous version in the same slot
8343                                         has_previous = False
8344                                         repo_name_prev = None
8345                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8346                                                 metadata["SLOT"])
8347                                         slot_matches = vardb.match(slot_atom)
8348                                         if slot_matches:
8349                                                 has_previous = True
8350                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8351                                                         ["repository"])[0]
8352
8353                                         # now use the data to generate output
8354                                         if pkg.installed or not has_previous:
8355                                                 repoadd = repo_display.repoStr(repo_path_real)
8356                                         else:
8357                                                 repo_path_prev = None
8358                                                 if repo_name_prev:
8359                                                         repo_path_prev = portdb.getRepositoryPath(
8360                                                                 repo_name_prev)
8361                                                 if repo_path_prev == repo_path_real:
8362                                                         repoadd = repo_display.repoStr(repo_path_real)
8363                                                 else:
8364                                                         repoadd = "%s=>%s" % (
8365                                                                 repo_display.repoStr(repo_path_prev),
8366                                                                 repo_display.repoStr(repo_path_real))
8367                                         if repoadd:
8368                                                 repoadd_set.add(repoadd)
8369
8370                                 xs = [portage.cpv_getkey(pkg_key)] + \
8371                                         list(portage.catpkgsplit(pkg_key)[2:])
8372                                 if xs[2] == "r0":
8373                                         xs[2] = ""
8374                                 else:
8375                                         xs[2] = "-" + xs[2]
8376
8377                                 mywidth = 130
8378                                 if "COLUMNWIDTH" in self.settings:
8379                                         try:
8380                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8381                                         except ValueError, e:
8382                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8383                                                 portage.writemsg(
8384                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8385                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8386                                                 del e
8387                                 oldlp = mywidth - 30
8388                                 newlp = oldlp - 30
8389
8390                                 # Convert myoldbest from a list to a string.
8391                                 if not myoldbest:
8392                                         myoldbest = ""
8393                                 else:
8394                                         for pos, key in enumerate(myoldbest):
8395                                                 key = portage.catpkgsplit(key)[2] + \
8396                                                         "-" + portage.catpkgsplit(key)[3]
8397                                                 if key[-3:] == "-r0":
8398                                                         key = key[:-3]
8399                                                 myoldbest[pos] = key
8400                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8401
8402                                 pkg_cp = xs[0]
8403                                 root_config = self.roots[myroot]
8404                                 system_set = root_config.sets["system"]
8405                                 world_set  = root_config.sets["world"]
8406
8407                                 pkg_system = False
8408                                 pkg_world = False
8409                                 try:
8410                                         pkg_system = system_set.findAtomForPackage(pkg)
8411                                         pkg_world  = world_set.findAtomForPackage(pkg)
8412                                         if not (oneshot or pkg_world) and \
8413                                                 myroot == self.target_root and \
8414                                                 favorites_set.findAtomForPackage(pkg):
8415                                                 # Maybe it will be added to world now.
8416                                                 if create_world_atom(pkg, favorites_set, root_config):
8417                                                         pkg_world = True
8418                                 except portage.exception.InvalidDependString:
8419                                         # This is reported elsewhere if relevant.
8420                                         pass
8421
8422                                 def pkgprint(pkg_str):
8423                                         if pkg_merge:
8424                                                 if pkg_system:
8425                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8426                                                 elif pkg_world:
8427                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8428                                                 else:
8429                                                         return colorize("PKG_MERGE", pkg_str)
8430                                         elif pkg_status == "uninstall":
8431                                                 return colorize("PKG_UNINSTALL", pkg_str)
8432                                         else:
8433                                                 if pkg_system:
8434                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8435                                                 elif pkg_world:
8436                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8437                                                 else:
8438                                                         return colorize("PKG_NOMERGE", pkg_str)
8439
8440                                 try:
8441                                         properties = flatten(use_reduce(paren_reduce(
8442                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8443                                 except portage.exception.InvalidDependString, e:
8444                                         if not pkg.installed:
8445                                                 show_invalid_depstring_notice(pkg,
8446                                                         pkg.metadata["PROPERTIES"], str(e))
8447                                                 del e
8448                                                 return 1
8449                                         properties = []
8450                                 interactive = "interactive" in properties
8451                                 if interactive and pkg.operation == "merge":
8452                                         addl = colorize("WARN", "I") + addl[1:]
8453                                         if ordered:
8454                                                 counters.interactive += 1
8455
8456                                 if x[1]!="/":
8457                                         if myoldbest:
8458                                                 myoldbest +=" "
8459                                         if "--columns" in self.myopts:
8460                                                 if "--quiet" in self.myopts:
8461                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8462                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8463                                                         myprint=myprint+myoldbest
8464                                                         myprint=myprint+darkgreen("to "+x[1])
8465                                                         verboseadd = None
8466                                                 else:
8467                                                         if not pkg_merge:
8468                                                                 myprint = "[%s] %s%s" % \
8469                                                                         (pkgprint(pkg_status.ljust(13)),
8470                                                                         indent, pkgprint(pkg.cp))
8471                                                         else:
8472                                                                 myprint = "[%s %s] %s%s" % \
8473                                                                         (pkgprint(pkg.type_name), addl,
8474                                                                         indent, pkgprint(pkg.cp))
8475                                                         if (newlp-nc_len(myprint)) > 0:
8476                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8477                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8478                                                         if (oldlp-nc_len(myprint)) > 0:
8479                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8480                                                         myprint=myprint+myoldbest
8481                                                         myprint += darkgreen("to " + pkg.root)
8482                                         else:
8483                                                 if not pkg_merge:
8484                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8485                                                 else:
8486                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8487                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8488                                                         myoldbest + darkgreen("to " + myroot)
8489                                 else:
8490                                         if "--columns" in self.myopts:
8491                                                 if "--quiet" in self.myopts:
8492                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8493                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8494                                                         myprint=myprint+myoldbest
8495                                                         verboseadd = None
8496                                                 else:
8497                                                         if not pkg_merge:
8498                                                                 myprint = "[%s] %s%s" % \
8499                                                                         (pkgprint(pkg_status.ljust(13)),
8500                                                                         indent, pkgprint(pkg.cp))
8501                                                         else:
8502                                                                 myprint = "[%s %s] %s%s" % \
8503                                                                         (pkgprint(pkg.type_name), addl,
8504                                                                         indent, pkgprint(pkg.cp))
8505                                                         if (newlp-nc_len(myprint)) > 0:
8506                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8507                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8508                                                         if (oldlp-nc_len(myprint)) > 0:
8509                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8510                                                         myprint += myoldbest
8511                                         else:
8512                                                 if not pkg_merge:
8513                                                         myprint = "[%s] %s%s %s" % \
8514                                                                 (pkgprint(pkg_status.ljust(13)),
8515                                                                 indent, pkgprint(pkg.cpv),
8516                                                                 myoldbest)
8517                                                 else:
8518                                                         myprint = "[%s %s] %s%s %s" % \
8519                                                                 (pkgprint(pkg_type), addl, indent,
8520                                                                 pkgprint(pkg.cpv), myoldbest)
8521
8522                                 if columns and pkg.operation == "uninstall":
8523                                         continue
8524                                 p.append((myprint, verboseadd, repoadd))
8525
8526                                 if "--tree" not in self.myopts and \
8527                                         "--quiet" not in self.myopts and \
8528                                         not self._opts_no_restart.intersection(self.myopts) and \
8529                                         pkg.root == self._running_root.root and \
8530                                         portage.match_from_list(
8531                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8532                                         not vardb.cpv_exists(pkg.cpv) and \
8533                                         "--quiet" not in self.myopts:
8534                                                 if mylist_index < len(mylist) - 1:
8535                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8536                                                         p.append(colorize("WARN", "    then resume the merge."))
8537
8538                 out = sys.stdout
8539                 show_repos = repoadd_set and repoadd_set != set(["0"])
8540
8541                 for x in p:
8542                         if isinstance(x, basestring):
8543                                 out.write("%s\n" % (x,))
8544                                 continue
8545
8546                         myprint, verboseadd, repoadd = x
8547
8548                         if verboseadd:
8549                                 myprint += " " + verboseadd
8550
8551                         if show_repos and repoadd:
8552                                 myprint += " " + teal("[%s]" % repoadd)
8553
8554                         out.write("%s\n" % (myprint,))
8555
8556                 for x in blockers:
8557                         print x
8558
8559                 if verbosity == 3:
8560                         print
8561                         print counters
8562                         if show_repos:
8563                                 sys.stdout.write(str(repo_display))
8564
8565                 if "--changelog" in self.myopts:
8566                         print
8567                         for revision,text in changelogs:
8568                                 print bold('*'+revision)
8569                                 sys.stdout.write(text)
8570
8571                 sys.stdout.flush()
8572                 return os.EX_OK
8573
8574         def display_problems(self):
8575                 """
8576                 Display problems with the dependency graph such as slot collisions.
8577                 This is called internally by display() to show the problems _after_
8578                 the merge list where it is most likely to be seen, but if display()
8579                 is not going to be called then this method should be called explicitly
8580                 to ensure that the user is notified of problems with the graph.
8581
8582                 All output goes to stderr, except for unsatisfied dependencies which
8583                 go to stdout for parsing by programs such as autounmask.
8584                 """
8585
8586                 # Note that show_masked_packages() sends it's output to
8587                 # stdout, and some programs such as autounmask parse the
8588                 # output in cases when emerge bails out. However, when
8589                 # show_masked_packages() is called for installed packages
8590                 # here, the message is a warning that is more appropriate
8591                 # to send to stderr, so temporarily redirect stdout to
8592                 # stderr. TODO: Fix output code so there's a cleaner way
8593                 # to redirect everything to stderr.
8594                 sys.stdout.flush()
8595                 sys.stderr.flush()
8596                 stdout = sys.stdout
8597                 try:
8598                         sys.stdout = sys.stderr
8599                         self._display_problems()
8600                 finally:
8601                         sys.stdout = stdout
8602                         sys.stdout.flush()
8603                         sys.stderr.flush()
8604
8605                 # This goes to stdout for parsing by programs like autounmask.
8606                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8607                         self._show_unsatisfied_dep(*pargs, **kwargs)
8608
8609         def _display_problems(self):
8610                 if self._circular_deps_for_display is not None:
8611                         self._show_circular_deps(
8612                                 self._circular_deps_for_display)
8613
8614                 # The user is only notified of a slot conflict if
8615                 # there are no unresolvable blocker conflicts.
8616                 if self._unsatisfied_blockers_for_display is not None:
8617                         self._show_unsatisfied_blockers(
8618                                 self._unsatisfied_blockers_for_display)
8619                 else:
8620                         self._show_slot_collision_notice()
8621
8622                 # TODO: Add generic support for "set problem" handlers so that
8623                 # the below warnings aren't special cases for world only.
8624
8625                 if self._missing_args:
8626                         world_problems = False
8627                         if "world" in self._sets:
8628                                 # Filter out indirect members of world (from nested sets)
8629                                 # since only direct members of world are desired here.
8630                                 world_set = self.roots[self.target_root].sets["world"]
8631                                 for arg, atom in self._missing_args:
8632                                         if arg.name == "world" and atom in world_set:
8633                                                 world_problems = True
8634                                                 break
8635
8636                         if world_problems:
8637                                 sys.stderr.write("\n!!! Problems have been " + \
8638                                         "detected with your world file\n")
8639                                 sys.stderr.write("!!! Please run " + \
8640                                         green("emaint --check world")+"\n\n")
8641
8642                 if self._missing_args:
8643                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8644                                 " Ebuilds for the following packages are either all\n")
8645                         sys.stderr.write(colorize("BAD", "!!!") + \
8646                                 " masked or don't exist:\n")
8647                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8648                                 self._missing_args) + "\n")
8649
8650                 if self._pprovided_args:
8651                         arg_refs = {}
8652                         for arg, atom in self._pprovided_args:
8653                                 if isinstance(arg, SetArg):
8654                                         parent = arg.name
8655                                         arg_atom = (atom, atom)
8656                                 else:
8657                                         parent = "args"
8658                                         arg_atom = (arg.arg, atom)
8659                                 refs = arg_refs.setdefault(arg_atom, [])
8660                                 if parent not in refs:
8661                                         refs.append(parent)
8662                         msg = []
8663                         msg.append(bad("\nWARNING: "))
8664                         if len(self._pprovided_args) > 1:
8665                                 msg.append("Requested packages will not be " + \
8666                                         "merged because they are listed in\n")
8667                         else:
8668                                 msg.append("A requested package will not be " + \
8669                                         "merged because it is listed in\n")
8670                         msg.append("package.provided:\n\n")
8671                         problems_sets = set()
8672                         for (arg, atom), refs in arg_refs.iteritems():
8673                                 ref_string = ""
8674                                 if refs:
8675                                         problems_sets.update(refs)
8676                                         refs.sort()
8677                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8678                                         ref_string = " pulled in by " + ref_string
8679                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8680                         msg.append("\n")
8681                         if "world" in problems_sets:
8682                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8683                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8684                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8685                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8686                                 msg.append("The best course of action depends on the reason that an offending\n")
8687                                 msg.append("package.provided entry exists.\n\n")
8688                         sys.stderr.write("".join(msg))
8689
8690                 masked_packages = []
8691                 for pkg in self._masked_installed:
8692                         root_config = pkg.root_config
8693                         pkgsettings = self.pkgsettings[pkg.root]
8694                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8695                         masked_packages.append((root_config, pkgsettings,
8696                                 pkg.cpv, pkg.metadata, mreasons))
8697                 if masked_packages:
8698                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8699                                 " The following installed packages are masked:\n")
8700                         show_masked_packages(masked_packages)
8701                         show_mask_docs()
8702                         print
8703
8704         def calc_changelog(self,ebuildpath,current,next):
8705                 if ebuildpath == None or not os.path.exists(ebuildpath):
8706                         return []
8707                 current = '-'.join(portage.catpkgsplit(current)[1:])
8708                 if current.endswith('-r0'):
8709                         current = current[:-3]
8710                 next = '-'.join(portage.catpkgsplit(next)[1:])
8711                 if next.endswith('-r0'):
8712                         next = next[:-3]
8713                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8714                 try:
8715                         changelog = open(changelogpath).read()
8716                 except SystemExit, e:
8717                         raise # Needed else can't exit
8718                 except:
8719                         return []
8720                 divisions = self.find_changelog_tags(changelog)
8721                 #print 'XX from',current,'to',next
8722                 #for div,text in divisions: print 'XX',div
8723                 # skip entries for all revisions above the one we are about to emerge
8724                 for i in range(len(divisions)):
8725                         if divisions[i][0]==next:
8726                                 divisions = divisions[i:]
8727                                 break
8728                 # find out how many entries we are going to display
8729                 for i in range(len(divisions)):
8730                         if divisions[i][0]==current:
8731                                 divisions = divisions[:i]
8732                                 break
8733                 else:
8734                     # couldnt find the current revision in the list. display nothing
8735                         return []
8736                 return divisions
8737
8738         def find_changelog_tags(self,changelog):
8739                 divs = []
8740                 release = None
8741                 while 1:
8742                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8743                         if match is None:
8744                                 if release is not None:
8745                                         divs.append((release,changelog))
8746                                 return divs
8747                         if release is not None:
8748                                 divs.append((release,changelog[:match.start()]))
8749                         changelog = changelog[match.end():]
8750                         release = match.group(1)
8751                         if release.endswith('.ebuild'):
8752                                 release = release[:-7]
8753                         if release.endswith('-r0'):
8754                                 release = release[:-3]
8755
8756         def saveNomergeFavorites(self):
8757                 """Find atoms in favorites that are not in the mergelist and add them
8758                 to the world file if necessary."""
8759                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8760                         "--oneshot", "--onlydeps", "--pretend"):
8761                         if x in self.myopts:
8762                                 return
8763                 root_config = self.roots[self.target_root]
8764                 world_set = root_config.sets["world"]
8765
8766                 world_locked = False
8767                 if hasattr(world_set, "lock"):
8768                         world_set.lock()
8769                         world_locked = True
8770
8771                 if hasattr(world_set, "load"):
8772                         world_set.load() # maybe it's changed on disk
8773
8774                 args_set = self._sets["args"]
8775                 portdb = self.trees[self.target_root]["porttree"].dbapi
8776                 added_favorites = set()
8777                 for x in self._set_nodes:
8778                         pkg_type, root, pkg_key, pkg_status = x
8779                         if pkg_status != "nomerge":
8780                                 continue
8781
8782                         try:
8783                                 myfavkey = create_world_atom(x, args_set, root_config)
8784                                 if myfavkey:
8785                                         if myfavkey in added_favorites:
8786                                                 continue
8787                                         added_favorites.add(myfavkey)
8788                         except portage.exception.InvalidDependString, e:
8789                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8790                                         (pkg_key, str(e)), noiselevel=-1)
8791                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8792                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8793                                 del e
8794                 all_added = []
8795                 for k in self._sets:
8796                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8797                                 continue
8798                         s = SETPREFIX + k
8799                         if s in world_set:
8800                                 continue
8801                         all_added.append(SETPREFIX + k)
8802                 all_added.extend(added_favorites)
8803                 all_added.sort()
8804                 for a in all_added:
8805                         print ">>> Recording %s in \"world\" favorites file..." % \
8806                                 colorize("INFORM", str(a))
8807                 if all_added:
8808                         world_set.update(all_added)
8809
8810                 if world_locked:
8811                         world_set.unlock()
8812
8813         def loadResumeCommand(self, resume_data, skip_masked=True,
8814                 skip_missing=True):
8815                 """
8816                 Add a resume command to the graph and validate it in the process.  This
8817                 will raise a PackageNotFound exception if a package is not available.
8818                 """
8819
8820                 if not isinstance(resume_data, dict):
8821                         return False
8822
8823                 mergelist = resume_data.get("mergelist")
8824                 if not isinstance(mergelist, list):
8825                         mergelist = []
8826
8827                 fakedb = self.mydbapi
8828                 trees = self.trees
8829                 serialized_tasks = []
8830                 masked_tasks = []
8831                 for x in mergelist:
8832                         if not (isinstance(x, list) and len(x) == 4):
8833                                 continue
8834                         pkg_type, myroot, pkg_key, action = x
8835                         if pkg_type not in self.pkg_tree_map:
8836                                 continue
8837                         if action != "merge":
8838                                 continue
8839                         tree_type = self.pkg_tree_map[pkg_type]
8840                         mydb = trees[myroot][tree_type].dbapi
8841                         db_keys = list(self._trees_orig[myroot][
8842                                 tree_type].dbapi._aux_cache_keys)
8843                         try:
8844                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8845                         except KeyError:
8846                                 # It does no exist or it is corrupt.
8847                                 if action == "uninstall":
8848                                         continue
8849                                 if skip_missing:
8850                                         # TODO: log these somewhere
8851                                         continue
8852                                 raise portage.exception.PackageNotFound(pkg_key)
8853                         installed = action == "uninstall"
8854                         built = pkg_type != "ebuild"
8855                         root_config = self.roots[myroot]
8856                         pkg = Package(built=built, cpv=pkg_key,
8857                                 installed=installed, metadata=metadata,
8858                                 operation=action, root_config=root_config,
8859                                 type_name=pkg_type)
8860                         if pkg_type == "ebuild":
8861                                 pkgsettings = self.pkgsettings[myroot]
8862                                 pkgsettings.setcpv(pkg)
8863                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8864                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8865                         self._pkg_cache[pkg] = pkg
8866
8867                         root_config = self.roots[pkg.root]
8868                         if "merge" == pkg.operation and \
8869                                 not visible(root_config.settings, pkg):
8870                                 if skip_masked:
8871                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8872                                 else:
8873                                         self._unsatisfied_deps_for_display.append(
8874                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8875
8876                         fakedb[myroot].cpv_inject(pkg)
8877                         serialized_tasks.append(pkg)
8878                         self.spinner.update()
8879
8880                 if self._unsatisfied_deps_for_display:
8881                         return False
8882
8883                 if not serialized_tasks or "--nodeps" in self.myopts:
8884                         self._serialized_tasks_cache = serialized_tasks
8885                         self._scheduler_graph = self.digraph
8886                 else:
8887                         self._select_package = self._select_pkg_from_graph
8888                         self.myparams.add("selective")
8889                         # Always traverse deep dependencies in order to account for
8890                         # potentially unsatisfied dependencies of installed packages.
8891                         # This is necessary for correct --keep-going or --resume operation
8892                         # in case a package from a group of circularly dependent packages
8893                         # fails. In this case, a package which has recently been installed
8894                         # may have an unsatisfied circular dependency (pulled in by
8895                         # PDEPEND, for example). So, even though a package is already
8896                         # installed, it may not have all of it's dependencies satisfied, so
8897                         # it may not be usable. If such a package is in the subgraph of
8898                         # deep depenedencies of a scheduled build, that build needs to
8899                         # be cancelled. In order for this type of situation to be
8900                         # recognized, deep traversal of dependencies is required.
8901                         self.myparams.add("deep")
8902
8903                         favorites = resume_data.get("favorites")
8904                         args_set = self._sets["args"]
8905                         if isinstance(favorites, list):
8906                                 args = self._load_favorites(favorites)
8907                         else:
8908                                 args = []
8909
8910                         for task in serialized_tasks:
8911                                 if isinstance(task, Package) and \
8912                                         task.operation == "merge":
8913                                         if not self._add_pkg(task, None):
8914                                                 return False
8915
8916                         # Packages for argument atoms need to be explicitly
8917                         # added via _add_pkg() so that they are included in the
8918                         # digraph (needed at least for --tree display).
8919                         for arg in args:
8920                                 for atom in arg.set:
8921                                         pkg, existing_node = self._select_package(
8922                                                 arg.root_config.root, atom)
8923                                         if existing_node is None and \
8924                                                 pkg is not None:
8925                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8926                                                         root=pkg.root, parent=arg)):
8927                                                         return False
8928
8929                         # Allow unsatisfied deps here to avoid showing a masking
8930                         # message for an unsatisfied dep that isn't necessarily
8931                         # masked.
8932                         if not self._create_graph(allow_unsatisfied=True):
8933                                 return False
8934
8935                         unsatisfied_deps = []
8936                         for dep in self._unsatisfied_deps:
8937                                 if not isinstance(dep.parent, Package):
8938                                         continue
8939                                 if dep.parent.operation == "merge":
8940                                         unsatisfied_deps.append(dep)
8941                                         continue
8942
8943                                 # For unsatisfied deps of installed packages, only account for
8944                                 # them if they are in the subgraph of dependencies of a package
8945                                 # which is scheduled to be installed.
8946                                 unsatisfied_install = False
8947                                 traversed = set()
8948                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8949                                 while dep_stack:
8950                                         node = dep_stack.pop()
8951                                         if not isinstance(node, Package):
8952                                                 continue
8953                                         if node.operation == "merge":
8954                                                 unsatisfied_install = True
8955                                                 break
8956                                         if node in traversed:
8957                                                 continue
8958                                         traversed.add(node)
8959                                         dep_stack.extend(self.digraph.parent_nodes(node))
8960
8961                                 if unsatisfied_install:
8962                                         unsatisfied_deps.append(dep)
8963
8964                         if masked_tasks or unsatisfied_deps:
8965                                 # This probably means that a required package
8966                                 # was dropped via --skipfirst. It makes the
8967                                 # resume list invalid, so convert it to a
8968                                 # UnsatisfiedResumeDep exception.
8969                                 raise self.UnsatisfiedResumeDep(self,
8970                                         masked_tasks + unsatisfied_deps)
8971                         self._serialized_tasks_cache = None
8972                         try:
8973                                 self.altlist()
8974                         except self._unknown_internal_error:
8975                                 return False
8976
8977                 return True
8978
8979         def _load_favorites(self, favorites):
8980                 """
8981                 Use a list of favorites to resume state from a
8982                 previous select_files() call. This creates similar
8983                 DependencyArg instances to those that would have
8984                 been created by the original select_files() call.
8985                 This allows Package instances to be matched with
8986                 DependencyArg instances during graph creation.
8987                 """
8988                 root_config = self.roots[self.target_root]
8989                 getSetAtoms = root_config.setconfig.getSetAtoms
8990                 sets = root_config.sets
8991                 args = []
8992                 for x in favorites:
8993                         if not isinstance(x, basestring):
8994                                 continue
8995                         if x in ("system", "world"):
8996                                 x = SETPREFIX + x
8997                         if x.startswith(SETPREFIX):
8998                                 s = x[len(SETPREFIX):]
8999                                 if s not in sets:
9000                                         continue
9001                                 if s in self._sets:
9002                                         continue
9003                                 # Recursively expand sets so that containment tests in
9004                                 # self._get_parent_sets() properly match atoms in nested
9005                                 # sets (like if world contains system).
9006                                 expanded_set = InternalPackageSet(
9007                                         initial_atoms=getSetAtoms(s))
9008                                 self._sets[s] = expanded_set
9009                                 args.append(SetArg(arg=x, set=expanded_set,
9010                                         root_config=root_config))
9011                         else:
9012                                 if not portage.isvalidatom(x):
9013                                         continue
9014                                 args.append(AtomArg(arg=x, atom=x,
9015                                         root_config=root_config))
9016
9017                 self._set_args(args)
9018                 return args
9019
9020         class UnsatisfiedResumeDep(portage.exception.PortageException):
9021                 """
9022                 A dependency of a resume list is not installed. This
9023                 can occur when a required package is dropped from the
9024                 merge list via --skipfirst.
9025                 """
9026                 def __init__(self, depgraph, value):
9027                         portage.exception.PortageException.__init__(self, value)
9028                         self.depgraph = depgraph
9029
9030         class _internal_exception(portage.exception.PortageException):
9031                 def __init__(self, value=""):
9032                         portage.exception.PortageException.__init__(self, value)
9033
9034         class _unknown_internal_error(_internal_exception):
9035                 """
9036                 Used by the depgraph internally to terminate graph creation.
9037                 The specific reason for the failure should have been dumped
9038                 to stderr, unfortunately, the exact reason for the failure
9039                 may not be known.
9040                 """
9041
9042         class _serialize_tasks_retry(_internal_exception):
9043                 """
9044                 This is raised by the _serialize_tasks() method when it needs to
9045                 be called again for some reason. The only case that it's currently
9046                 used for is when neglected dependencies need to be added to the
9047                 graph in order to avoid making a potentially unsafe decision.
9048                 """
9049
9050         class _dep_check_composite_db(portage.dbapi):
9051                 """
9052                 A dbapi-like interface that is optimized for use in dep_check() calls.
9053                 This is built on top of the existing depgraph package selection logic.
9054                 Some packages that have been added to the graph may be masked from this
9055                 view in order to influence the atom preference selection that occurs
9056                 via dep_check().
9057                 """
9058                 def __init__(self, depgraph, root):
9059                         portage.dbapi.__init__(self)
9060                         self._depgraph = depgraph
9061                         self._root = root
9062                         self._match_cache = {}
9063                         self._cpv_pkg_map = {}
9064
9065                 def _clear_cache(self):
9066                         self._match_cache.clear()
9067                         self._cpv_pkg_map.clear()
9068
9069                 def match(self, atom):
9070                         ret = self._match_cache.get(atom)
9071                         if ret is not None:
9072                                 return ret[:]
9073                         orig_atom = atom
9074                         if "/" not in atom:
9075                                 atom = self._dep_expand(atom)
9076                         pkg, existing = self._depgraph._select_package(self._root, atom)
9077                         if not pkg:
9078                                 ret = []
9079                         else:
9080                                 # Return the highest available from select_package() as well as
9081                                 # any matching slots in the graph db.
9082                                 slots = set()
9083                                 slots.add(pkg.metadata["SLOT"])
9084                                 atom_cp = portage.dep_getkey(atom)
9085                                 if pkg.cp.startswith("virtual/"):
9086                                         # For new-style virtual lookahead that occurs inside
9087                                         # dep_check(), examine all slots. This is needed
9088                                         # so that newer slots will not unnecessarily be pulled in
9089                                         # when a satisfying lower slot is already installed. For
9090                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9091                                         # there's no need to pull in a newer slot to satisfy a
9092                                         # virtual/jdk dependency.
9093                                         for db, pkg_type, built, installed, db_keys in \
9094                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9095                                                 for cpv in db.match(atom):
9096                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9097                                                                 continue
9098                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9099                                 ret = []
9100                                 if self._visible(pkg):
9101                                         self._cpv_pkg_map[pkg.cpv] = pkg
9102                                         ret.append(pkg.cpv)
9103                                 slots.remove(pkg.metadata["SLOT"])
9104                                 while slots:
9105                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9106                                         pkg, existing = self._depgraph._select_package(
9107                                                 self._root, slot_atom)
9108                                         if not pkg:
9109                                                 continue
9110                                         if not self._visible(pkg):
9111                                                 continue
9112                                         self._cpv_pkg_map[pkg.cpv] = pkg
9113                                         ret.append(pkg.cpv)
9114                                 if ret:
9115                                         self._cpv_sort_ascending(ret)
9116                         self._match_cache[orig_atom] = ret
9117                         return ret[:]
9118
9119                 def _visible(self, pkg):
9120                         if pkg.installed and "selective" not in self._depgraph.myparams:
9121                                 try:
9122                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9123                                 except (StopIteration, portage.exception.InvalidDependString):
9124                                         arg = None
9125                                 if arg:
9126                                         return False
9127                         if pkg.installed:
9128                                 try:
9129                                         if not visible(
9130                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9131                                                 return False
9132                                 except portage.exception.InvalidDependString:
9133                                         pass
9134                         in_graph = self._depgraph._slot_pkg_map[
9135                                 self._root].get(pkg.slot_atom)
9136                         if in_graph is None:
9137                                 # Mask choices for packages which are not the highest visible
9138                                 # version within their slot (since they usually trigger slot
9139                                 # conflicts).
9140                                 highest_visible, in_graph = self._depgraph._select_package(
9141                                         self._root, pkg.slot_atom)
9142                                 if pkg != highest_visible:
9143                                         return False
9144                         elif in_graph != pkg:
9145                                 # Mask choices for packages that would trigger a slot
9146                                 # conflict with a previously selected package.
9147                                 return False
9148                         return True
9149
9150                 def _dep_expand(self, atom):
9151                         """
9152                         This is only needed for old installed packages that may
9153                         contain atoms that are not fully qualified with a specific
9154                         category. Emulate the cpv_expand() function that's used by
9155                         dbapi.match() in cases like this. If there are multiple
9156                         matches, it's often due to a new-style virtual that has
9157                         been added, so try to filter those out to avoid raising
9158                         a ValueError.
9159                         """
9160                         root_config = self._depgraph.roots[self._root]
9161                         orig_atom = atom
9162                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9163                         if len(expanded_atoms) > 1:
9164                                 non_virtual_atoms = []
9165                                 for x in expanded_atoms:
9166                                         if not portage.dep_getkey(x).startswith("virtual/"):
9167                                                 non_virtual_atoms.append(x)
9168                                 if len(non_virtual_atoms) == 1:
9169                                         expanded_atoms = non_virtual_atoms
9170                         if len(expanded_atoms) > 1:
9171                                 # compatible with portage.cpv_expand()
9172                                 raise portage.exception.AmbiguousPackageName(
9173                                         [portage.dep_getkey(x) for x in expanded_atoms])
9174                         if expanded_atoms:
9175                                 atom = expanded_atoms[0]
9176                         else:
9177                                 null_atom = insert_category_into_atom(atom, "null")
9178                                 null_cp = portage.dep_getkey(null_atom)
9179                                 cat, atom_pn = portage.catsplit(null_cp)
9180                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9181                                 if virts_p:
9182                                         # Allow the resolver to choose which virtual.
9183                                         atom = insert_category_into_atom(atom, "virtual")
9184                                 else:
9185                                         atom = insert_category_into_atom(atom, "null")
9186                         return atom
9187
9188                 def aux_get(self, cpv, wants):
9189                         metadata = self._cpv_pkg_map[cpv].metadata
9190                         return [metadata.get(x, "") for x in wants]
9191
9192 class RepoDisplay(object):
9193         def __init__(self, roots):
9194                 self._shown_repos = {}
9195                 self._unknown_repo = False
9196                 repo_paths = set()
9197                 for root_config in roots.itervalues():
9198                         portdir = root_config.settings.get("PORTDIR")
9199                         if portdir:
9200                                 repo_paths.add(portdir)
9201                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9202                         if overlays:
9203                                 repo_paths.update(overlays.split())
9204                 repo_paths = list(repo_paths)
9205                 self._repo_paths = repo_paths
9206                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9207                         for repo_path in repo_paths ]
9208
9209                 # pre-allocate index for PORTDIR so that it always has index 0.
9210                 for root_config in roots.itervalues():
9211                         portdb = root_config.trees["porttree"].dbapi
9212                         portdir = portdb.porttree_root
9213                         if portdir:
9214                                 self.repoStr(portdir)
9215
9216         def repoStr(self, repo_path_real):
9217                 real_index = -1
9218                 if repo_path_real:
9219                         real_index = self._repo_paths_real.index(repo_path_real)
9220                 if real_index == -1:
9221                         s = "?"
9222                         self._unknown_repo = True
9223                 else:
9224                         shown_repos = self._shown_repos
9225                         repo_paths = self._repo_paths
9226                         repo_path = repo_paths[real_index]
9227                         index = shown_repos.get(repo_path)
9228                         if index is None:
9229                                 index = len(shown_repos)
9230                                 shown_repos[repo_path] = index
9231                         s = str(index)
9232                 return s
9233
9234         def __str__(self):
9235                 output = []
9236                 shown_repos = self._shown_repos
9237                 unknown_repo = self._unknown_repo
9238                 if shown_repos or self._unknown_repo:
9239                         output.append("Portage tree and overlays:\n")
9240                 show_repo_paths = list(shown_repos)
9241                 for repo_path, repo_index in shown_repos.iteritems():
9242                         show_repo_paths[repo_index] = repo_path
9243                 if show_repo_paths:
9244                         for index, repo_path in enumerate(show_repo_paths):
9245                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9246                 if unknown_repo:
9247                         output.append(" "+teal("[?]") + \
9248                                 " indicates that the source repository could not be determined\n")
9249                 return "".join(output)
9250
9251 class PackageCounters(object):
9252
9253         def __init__(self):
9254                 self.upgrades   = 0
9255                 self.downgrades = 0
9256                 self.new        = 0
9257                 self.newslot    = 0
9258                 self.reinst     = 0
9259                 self.uninst     = 0
9260                 self.blocks     = 0
9261                 self.blocks_satisfied         = 0
9262                 self.totalsize  = 0
9263                 self.restrict_fetch           = 0
9264                 self.restrict_fetch_satisfied = 0
9265                 self.interactive              = 0
9266
9267         def __str__(self):
9268                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9269                 myoutput = []
9270                 details = []
9271                 myoutput.append("Total: %s package" % total_installs)
9272                 if total_installs != 1:
9273                         myoutput.append("s")
9274                 if total_installs != 0:
9275                         myoutput.append(" (")
9276                 if self.upgrades > 0:
9277                         details.append("%s upgrade" % self.upgrades)
9278                         if self.upgrades > 1:
9279                                 details[-1] += "s"
9280                 if self.downgrades > 0:
9281                         details.append("%s downgrade" % self.downgrades)
9282                         if self.downgrades > 1:
9283                                 details[-1] += "s"
9284                 if self.new > 0:
9285                         details.append("%s new" % self.new)
9286                 if self.newslot > 0:
9287                         details.append("%s in new slot" % self.newslot)
9288                         if self.newslot > 1:
9289                                 details[-1] += "s"
9290                 if self.reinst > 0:
9291                         details.append("%s reinstall" % self.reinst)
9292                         if self.reinst > 1:
9293                                 details[-1] += "s"
9294                 if self.uninst > 0:
9295                         details.append("%s uninstall" % self.uninst)
9296                         if self.uninst > 1:
9297                                 details[-1] += "s"
9298                 if self.interactive > 0:
9299                         details.append("%s %s" % (self.interactive,
9300                                 colorize("WARN", "interactive")))
9301                 myoutput.append(", ".join(details))
9302                 if total_installs != 0:
9303                         myoutput.append(")")
9304                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9305                 if self.restrict_fetch:
9306                         myoutput.append("\nFetch Restriction: %s package" % \
9307                                 self.restrict_fetch)
9308                         if self.restrict_fetch > 1:
9309                                 myoutput.append("s")
9310                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9311                         myoutput.append(bad(" (%s unsatisfied)") % \
9312                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9313                 if self.blocks > 0:
9314                         myoutput.append("\nConflict: %s block" % \
9315                                 self.blocks)
9316                         if self.blocks > 1:
9317                                 myoutput.append("s")
9318                         if self.blocks_satisfied < self.blocks:
9319                                 myoutput.append(bad(" (%s unsatisfied)") % \
9320                                         (self.blocks - self.blocks_satisfied))
9321                 return "".join(myoutput)
9322
9323 class UseFlagDisplay(object):
9324
9325         __slots__ = ('name', 'enabled', 'forced')
9326
9327         def __init__(self, name, enabled, forced):
9328                 self.name = name
9329                 self.enabled = enabled
9330                 self.forced = forced
9331
9332         def __str__(self):
9333                 s = self.name
9334                 if self.enabled:
9335                         s = red(s)
9336                 else:
9337                         s = '-' + s
9338                         s = blue(s)
9339                 if self.forced:
9340                         s = '(%s)' % s
9341                 return s
9342
9343         def _cmp_combined(a, b):
9344                 """
9345                 Sort by name, combining enabled and disabled flags.
9346                 """
9347                 return (a.name > b.name) - (a.name < b.name)
9348
9349         sort_combined = cmp_sort_key(_cmp_combined)
9350         del _cmp_combined
9351
9352         def _cmp_separated(a, b):
9353                 """
9354                 Sort by name, separating enabled flags from disabled flags.
9355                 """
9356                 enabled_diff = b.enabled - a.enabled
9357                 if enabled_diff:
9358                         return enabled_diff
9359                 return (a.name > b.name) - (a.name < b.name)
9360
9361         sort_separated = cmp_sort_key(_cmp_separated)
9362         del _cmp_separated
9363
9364 class PollSelectAdapter(PollConstants):
9365
9366         """
9367         Use select to emulate a poll object, for
9368         systems that don't support poll().
9369         """
9370
9371         def __init__(self):
9372                 self._registered = {}
9373                 self._select_args = [[], [], []]
9374
9375         def register(self, fd, *args):
9376                 """
9377                 Only POLLIN is currently supported!
9378                 """
9379                 if len(args) > 1:
9380                         raise TypeError(
9381                                 "register expected at most 2 arguments, got " + \
9382                                 repr(1 + len(args)))
9383
9384                 eventmask = PollConstants.POLLIN | \
9385                         PollConstants.POLLPRI | PollConstants.POLLOUT
9386                 if args:
9387                         eventmask = args[0]
9388
9389                 self._registered[fd] = eventmask
9390                 self._select_args = None
9391
9392         def unregister(self, fd):
9393                 self._select_args = None
9394                 del self._registered[fd]
9395
9396         def poll(self, *args):
9397                 if len(args) > 1:
9398                         raise TypeError(
9399                                 "poll expected at most 2 arguments, got " + \
9400                                 repr(1 + len(args)))
9401
9402                 timeout = None
9403                 if args:
9404                         timeout = args[0]
9405
9406                 select_args = self._select_args
9407                 if select_args is None:
9408                         select_args = [self._registered.keys(), [], []]
9409
9410                 if timeout is not None:
9411                         select_args = select_args[:]
9412                         # Translate poll() timeout args to select() timeout args:
9413                         #
9414                         #          | units        | value(s) for indefinite block
9415                         # ---------|--------------|------------------------------
9416                         #   poll   | milliseconds | omitted, negative, or None
9417                         # ---------|--------------|------------------------------
9418                         #   select | seconds      | omitted
9419                         # ---------|--------------|------------------------------
9420
9421                         if timeout is not None and timeout < 0:
9422                                 timeout = None
9423                         if timeout is not None:
9424                                 select_args.append(timeout / 1000)
9425
9426                 select_events = select.select(*select_args)
9427                 poll_events = []
9428                 for fd in select_events[0]:
9429                         poll_events.append((fd, PollConstants.POLLIN))
9430                 return poll_events
9431
9432 class SequentialTaskQueue(SlotObject):
9433
9434         __slots__ = ("max_jobs", "running_tasks") + \
9435                 ("_dirty", "_scheduling", "_task_queue")
9436
9437         def __init__(self, **kwargs):
9438                 SlotObject.__init__(self, **kwargs)
9439                 self._task_queue = deque()
9440                 self.running_tasks = set()
9441                 if self.max_jobs is None:
9442                         self.max_jobs = 1
9443                 self._dirty = True
9444
9445         def add(self, task):
9446                 self._task_queue.append(task)
9447                 self._dirty = True
9448
9449         def addFront(self, task):
9450                 self._task_queue.appendleft(task)
9451                 self._dirty = True
9452
9453         def schedule(self):
9454
9455                 if not self._dirty:
9456                         return False
9457
9458                 if not self:
9459                         return False
9460
9461                 if self._scheduling:
9462                         # Ignore any recursive schedule() calls triggered via
9463                         # self._task_exit().
9464                         return False
9465
9466                 self._scheduling = True
9467
9468                 task_queue = self._task_queue
9469                 running_tasks = self.running_tasks
9470                 max_jobs = self.max_jobs
9471                 state_changed = False
9472
9473                 while task_queue and \
9474                         (max_jobs is True or len(running_tasks) < max_jobs):
9475                         task = task_queue.popleft()
9476                         cancelled = getattr(task, "cancelled", None)
9477                         if not cancelled:
9478                                 running_tasks.add(task)
9479                                 task.addExitListener(self._task_exit)
9480                                 task.start()
9481                         state_changed = True
9482
9483                 self._dirty = False
9484                 self._scheduling = False
9485
9486                 return state_changed
9487
9488         def _task_exit(self, task):
9489                 """
9490                 Since we can always rely on exit listeners being called, the set of
9491                 running tasks is always pruned automatically and there is never any need
9492                 to actively prune it.
9493                 """
9494                 self.running_tasks.remove(task)
9495                 if self._task_queue:
9496                         self._dirty = True
9497
9498         def clear(self):
9499                 self._task_queue.clear()
9500                 running_tasks = self.running_tasks
9501                 while running_tasks:
9502                         task = running_tasks.pop()
9503                         task.removeExitListener(self._task_exit)
9504                         task.cancel()
9505                 self._dirty = False
9506
9507         def __nonzero__(self):
9508                 return bool(self._task_queue or self.running_tasks)
9509
9510         def __len__(self):
9511                 return len(self._task_queue) + len(self.running_tasks)
9512
9513 _can_poll_device = None
9514
9515 def can_poll_device():
9516         """
9517         Test if it's possible to use poll() on a device such as a pty. This
9518         is known to fail on Darwin.
9519         @rtype: bool
9520         @returns: True if poll() on a device succeeds, False otherwise.
9521         """
9522
9523         global _can_poll_device
9524         if _can_poll_device is not None:
9525                 return _can_poll_device
9526
9527         if not hasattr(select, "poll"):
9528                 _can_poll_device = False
9529                 return _can_poll_device
9530
9531         try:
9532                 dev_null = open('/dev/null', 'rb')
9533         except IOError:
9534                 _can_poll_device = False
9535                 return _can_poll_device
9536
9537         p = select.poll()
9538         p.register(dev_null.fileno(), PollConstants.POLLIN)
9539
9540         invalid_request = False
9541         for f, event in p.poll():
9542                 if event & PollConstants.POLLNVAL:
9543                         invalid_request = True
9544                         break
9545         dev_null.close()
9546
9547         _can_poll_device = not invalid_request
9548         return _can_poll_device
9549
9550 def create_poll_instance():
9551         """
9552         Create an instance of select.poll, or an instance of
9553         PollSelectAdapter there is no poll() implementation or
9554         it is broken somehow.
9555         """
9556         if can_poll_device():
9557                 return select.poll()
9558         return PollSelectAdapter()
9559
9560 getloadavg = getattr(os, "getloadavg", None)
9561 if getloadavg is None:
9562         def getloadavg():
9563                 """
9564                 Uses /proc/loadavg to emulate os.getloadavg().
9565                 Raises OSError if the load average was unobtainable.
9566                 """
9567                 try:
9568                         loadavg_str = open('/proc/loadavg').readline()
9569                 except IOError:
9570                         # getloadavg() is only supposed to raise OSError, so convert
9571                         raise OSError('unknown')
9572                 loadavg_split = loadavg_str.split()
9573                 if len(loadavg_split) < 3:
9574                         raise OSError('unknown')
9575                 loadavg_floats = []
9576                 for i in xrange(3):
9577                         try:
9578                                 loadavg_floats.append(float(loadavg_split[i]))
9579                         except ValueError:
9580                                 raise OSError('unknown')
9581                 return tuple(loadavg_floats)
9582
9583 class PollScheduler(object):
9584
9585         class _sched_iface_class(SlotObject):
9586                 __slots__ = ("register", "schedule", "unregister")
9587
9588         def __init__(self):
9589                 self._max_jobs = 1
9590                 self._max_load = None
9591                 self._jobs = 0
9592                 self._poll_event_queue = []
9593                 self._poll_event_handlers = {}
9594                 self._poll_event_handler_ids = {}
9595                 # Increment id for each new handler.
9596                 self._event_handler_id = 0
9597                 self._poll_obj = create_poll_instance()
9598                 self._scheduling = False
9599
9600         def _schedule(self):
9601                 """
9602                 Calls _schedule_tasks() and automatically returns early from
9603                 any recursive calls to this method that the _schedule_tasks()
9604                 call might trigger. This makes _schedule() safe to call from
9605                 inside exit listeners.
9606                 """
9607                 if self._scheduling:
9608                         return False
9609                 self._scheduling = True
9610                 try:
9611                         return self._schedule_tasks()
9612                 finally:
9613                         self._scheduling = False
9614
9615         def _running_job_count(self):
9616                 return self._jobs
9617
9618         def _can_add_job(self):
9619                 max_jobs = self._max_jobs
9620                 max_load = self._max_load
9621
9622                 if self._max_jobs is not True and \
9623                         self._running_job_count() >= self._max_jobs:
9624                         return False
9625
9626                 if max_load is not None and \
9627                         (max_jobs is True or max_jobs > 1) and \
9628                         self._running_job_count() >= 1:
9629                         try:
9630                                 avg1, avg5, avg15 = getloadavg()
9631                         except OSError:
9632                                 return False
9633
9634                         if avg1 >= max_load:
9635                                 return False
9636
9637                 return True
9638
9639         def _poll(self, timeout=None):
9640                 """
9641                 All poll() calls pass through here. The poll events
9642                 are added directly to self._poll_event_queue.
9643                 In order to avoid endless blocking, this raises
9644                 StopIteration if timeout is None and there are
9645                 no file descriptors to poll.
9646                 """
9647                 if not self._poll_event_handlers:
9648                         self._schedule()
9649                         if timeout is None and \
9650                                 not self._poll_event_handlers:
9651                                 raise StopIteration(
9652                                         "timeout is None and there are no poll() event handlers")
9653
9654                 # The following error is known to occur with Linux kernel versions
9655                 # less than 2.6.24:
9656                 #
9657                 #   select.error: (4, 'Interrupted system call')
9658                 #
9659                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9660                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9661                 # without any events.
9662                 while True:
9663                         try:
9664                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9665                                 break
9666                         except select.error, e:
9667                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9668                                         level=logging.ERROR, noiselevel=-1)
9669                                 del e
9670                                 if timeout is not None:
9671                                         break
9672
9673         def _next_poll_event(self, timeout=None):
9674                 """
9675                 Since the _schedule_wait() loop is called by event
9676                 handlers from _poll_loop(), maintain a central event
9677                 queue for both of them to share events from a single
9678                 poll() call. In order to avoid endless blocking, this
9679                 raises StopIteration if timeout is None and there are
9680                 no file descriptors to poll.
9681                 """
9682                 if not self._poll_event_queue:
9683                         self._poll(timeout)
9684                 return self._poll_event_queue.pop()
9685
9686         def _poll_loop(self):
9687
9688                 event_handlers = self._poll_event_handlers
9689                 event_handled = False
9690
9691                 try:
9692                         while event_handlers:
9693                                 f, event = self._next_poll_event()
9694                                 handler, reg_id = event_handlers[f]
9695                                 handler(f, event)
9696                                 event_handled = True
9697                 except StopIteration:
9698                         event_handled = True
9699
9700                 if not event_handled:
9701                         raise AssertionError("tight loop")
9702
9703         def _schedule_yield(self):
9704                 """
9705                 Schedule for a short period of time chosen by the scheduler based
9706                 on internal state. Synchronous tasks should call this periodically
9707                 in order to allow the scheduler to service pending poll events. The
9708                 scheduler will call poll() exactly once, without blocking, and any
9709                 resulting poll events will be serviced.
9710                 """
9711                 event_handlers = self._poll_event_handlers
9712                 events_handled = 0
9713
9714                 if not event_handlers:
9715                         return bool(events_handled)
9716
9717                 if not self._poll_event_queue:
9718                         self._poll(0)
9719
9720                 try:
9721                         while event_handlers and self._poll_event_queue:
9722                                 f, event = self._next_poll_event()
9723                                 handler, reg_id = event_handlers[f]
9724                                 handler(f, event)
9725                                 events_handled += 1
9726                 except StopIteration:
9727                         events_handled += 1
9728
9729                 return bool(events_handled)
9730
9731         def _register(self, f, eventmask, handler):
9732                 """
9733                 @rtype: Integer
9734                 @return: A unique registration id, for use in schedule() or
9735                         unregister() calls.
9736                 """
9737                 if f in self._poll_event_handlers:
9738                         raise AssertionError("fd %d is already registered" % f)
9739                 self._event_handler_id += 1
9740                 reg_id = self._event_handler_id
9741                 self._poll_event_handler_ids[reg_id] = f
9742                 self._poll_event_handlers[f] = (handler, reg_id)
9743                 self._poll_obj.register(f, eventmask)
9744                 return reg_id
9745
9746         def _unregister(self, reg_id):
9747                 f = self._poll_event_handler_ids[reg_id]
9748                 self._poll_obj.unregister(f)
9749                 del self._poll_event_handlers[f]
9750                 del self._poll_event_handler_ids[reg_id]
9751
9752         def _schedule_wait(self, wait_ids):
9753                 """
9754                 Schedule until wait_id is not longer registered
9755                 for poll() events.
9756                 @type wait_id: int
9757                 @param wait_id: a task id to wait for
9758                 """
9759                 event_handlers = self._poll_event_handlers
9760                 handler_ids = self._poll_event_handler_ids
9761                 event_handled = False
9762
9763                 if isinstance(wait_ids, int):
9764                         wait_ids = frozenset([wait_ids])
9765
9766                 try:
9767                         while wait_ids.intersection(handler_ids):
9768                                 f, event = self._next_poll_event()
9769                                 handler, reg_id = event_handlers[f]
9770                                 handler(f, event)
9771                                 event_handled = True
9772                 except StopIteration:
9773                         event_handled = True
9774
9775                 return event_handled
9776
9777 class QueueScheduler(PollScheduler):
9778
9779         """
9780         Add instances of SequentialTaskQueue and then call run(). The
9781         run() method returns when no tasks remain.
9782         """
9783
9784         def __init__(self, max_jobs=None, max_load=None):
9785                 PollScheduler.__init__(self)
9786
9787                 if max_jobs is None:
9788                         max_jobs = 1
9789
9790                 self._max_jobs = max_jobs
9791                 self._max_load = max_load
9792                 self.sched_iface = self._sched_iface_class(
9793                         register=self._register,
9794                         schedule=self._schedule_wait,
9795                         unregister=self._unregister)
9796
9797                 self._queues = []
9798                 self._schedule_listeners = []
9799
9800         def add(self, q):
9801                 self._queues.append(q)
9802
9803         def remove(self, q):
9804                 self._queues.remove(q)
9805
9806         def run(self):
9807
9808                 while self._schedule():
9809                         self._poll_loop()
9810
9811                 while self._running_job_count():
9812                         self._poll_loop()
9813
9814         def _schedule_tasks(self):
9815                 """
9816                 @rtype: bool
9817                 @returns: True if there may be remaining tasks to schedule,
9818                         False otherwise.
9819                 """
9820                 while self._can_add_job():
9821                         n = self._max_jobs - self._running_job_count()
9822                         if n < 1:
9823                                 break
9824
9825                         if not self._start_next_job(n):
9826                                 return False
9827
9828                 for q in self._queues:
9829                         if q:
9830                                 return True
9831                 return False
9832
9833         def _running_job_count(self):
9834                 job_count = 0
9835                 for q in self._queues:
9836                         job_count += len(q.running_tasks)
9837                 self._jobs = job_count
9838                 return job_count
9839
9840         def _start_next_job(self, n=1):
9841                 started_count = 0
9842                 for q in self._queues:
9843                         initial_job_count = len(q.running_tasks)
9844                         q.schedule()
9845                         final_job_count = len(q.running_tasks)
9846                         if final_job_count > initial_job_count:
9847                                 started_count += (final_job_count - initial_job_count)
9848                         if started_count >= n:
9849                                 break
9850                 return started_count
9851
9852 class TaskScheduler(object):
9853
9854         """
9855         A simple way to handle scheduling of AsynchrousTask instances. Simply
9856         add tasks and call run(). The run() method returns when no tasks remain.
9857         """
9858
9859         def __init__(self, max_jobs=None, max_load=None):
9860                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9861                 self._scheduler = QueueScheduler(
9862                         max_jobs=max_jobs, max_load=max_load)
9863                 self.sched_iface = self._scheduler.sched_iface
9864                 self.run = self._scheduler.run
9865                 self._scheduler.add(self._queue)
9866
9867         def add(self, task):
9868                 self._queue.add(task)
9869
9870 class JobStatusDisplay(object):
9871
9872         _bound_properties = ("curval", "failed", "running")
9873         _jobs_column_width = 48
9874
9875         # Don't update the display unless at least this much
9876         # time has passed, in units of seconds.
9877         _min_display_latency = 2
9878
9879         _default_term_codes = {
9880                 'cr'  : '\r',
9881                 'el'  : '\x1b[K',
9882                 'nel' : '\n',
9883         }
9884
9885         _termcap_name_map = {
9886                 'carriage_return' : 'cr',
9887                 'clr_eol'         : 'el',
9888                 'newline'         : 'nel',
9889         }
9890
9891         def __init__(self, out=sys.stdout, quiet=False, xterm_titles=True):
9892                 object.__setattr__(self, "out", out)
9893                 object.__setattr__(self, "quiet", quiet)
9894                 object.__setattr__(self, "xterm_titles", xterm_titles)
9895                 object.__setattr__(self, "maxval", 0)
9896                 object.__setattr__(self, "merges", 0)
9897                 object.__setattr__(self, "_changed", False)
9898                 object.__setattr__(self, "_displayed", False)
9899                 object.__setattr__(self, "_last_display_time", 0)
9900                 object.__setattr__(self, "width", 80)
9901                 self.reset()
9902
9903                 isatty = hasattr(out, "isatty") and out.isatty()
9904                 object.__setattr__(self, "_isatty", isatty)
9905                 if not isatty or not self._init_term():
9906                         term_codes = {}
9907                         for k, capname in self._termcap_name_map.iteritems():
9908                                 term_codes[k] = self._default_term_codes[capname]
9909                         object.__setattr__(self, "_term_codes", term_codes)
9910                 encoding = sys.getdefaultencoding()
9911                 for k, v in self._term_codes.items():
9912                         if not isinstance(v, basestring):
9913                                 self._term_codes[k] = v.decode(encoding, 'replace')
9914
9915         def _init_term(self):
9916                 """
9917                 Initialize term control codes.
9918                 @rtype: bool
9919                 @returns: True if term codes were successfully initialized,
9920                         False otherwise.
9921                 """
9922
9923                 term_type = os.environ.get("TERM", "vt100")
9924                 tigetstr = None
9925
9926                 try:
9927                         import curses
9928                         try:
9929                                 curses.setupterm(term_type, self.out.fileno())
9930                                 tigetstr = curses.tigetstr
9931                         except curses.error:
9932                                 pass
9933                 except ImportError:
9934                         pass
9935
9936                 if tigetstr is None:
9937                         return False
9938
9939                 term_codes = {}
9940                 for k, capname in self._termcap_name_map.iteritems():
9941                         code = tigetstr(capname)
9942                         if code is None:
9943                                 code = self._default_term_codes[capname]
9944                         term_codes[k] = code
9945                 object.__setattr__(self, "_term_codes", term_codes)
9946                 return True
9947
9948         def _format_msg(self, msg):
9949                 return ">>> %s" % msg
9950
9951         def _erase(self):
9952                 self.out.write(
9953                         self._term_codes['carriage_return'] + \
9954                         self._term_codes['clr_eol'])
9955                 self.out.flush()
9956                 self._displayed = False
9957
9958         def _display(self, line):
9959                 self.out.write(line)
9960                 self.out.flush()
9961                 self._displayed = True
9962
9963         def _update(self, msg):
9964
9965                 out = self.out
9966                 if not self._isatty:
9967                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9968                         self.out.flush()
9969                         self._displayed = True
9970                         return
9971
9972                 if self._displayed:
9973                         self._erase()
9974
9975                 self._display(self._format_msg(msg))
9976
9977         def displayMessage(self, msg):
9978
9979                 was_displayed = self._displayed
9980
9981                 if self._isatty and self._displayed:
9982                         self._erase()
9983
9984                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9985                 self.out.flush()
9986                 self._displayed = False
9987
9988                 if was_displayed:
9989                         self._changed = True
9990                         self.display()
9991
9992         def reset(self):
9993                 self.maxval = 0
9994                 self.merges = 0
9995                 for name in self._bound_properties:
9996                         object.__setattr__(self, name, 0)
9997
9998                 if self._displayed:
9999                         self.out.write(self._term_codes['newline'])
10000                         self.out.flush()
10001                         self._displayed = False
10002
10003         def __setattr__(self, name, value):
10004                 old_value = getattr(self, name)
10005                 if value == old_value:
10006                         return
10007                 object.__setattr__(self, name, value)
10008                 if name in self._bound_properties:
10009                         self._property_change(name, old_value, value)
10010
10011         def _property_change(self, name, old_value, new_value):
10012                 self._changed = True
10013                 self.display()
10014
10015         def _load_avg_str(self):
10016                 try:
10017                         avg = getloadavg()
10018                 except OSError:
10019                         return 'unknown'
10020
10021                 max_avg = max(avg)
10022
10023                 if max_avg < 10:
10024                         digits = 2
10025                 elif max_avg < 100:
10026                         digits = 1
10027                 else:
10028                         digits = 0
10029
10030                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
10031
10032         def display(self):
10033                 """
10034                 Display status on stdout, but only if something has
10035                 changed since the last call.
10036                 """
10037
10038                 if self.quiet:
10039                         return
10040
10041                 current_time = time.time()
10042                 time_delta = current_time - self._last_display_time
10043                 if self._displayed and \
10044                         not self._changed:
10045                         if not self._isatty:
10046                                 return
10047                         if time_delta < self._min_display_latency:
10048                                 return
10049
10050                 self._last_display_time = current_time
10051                 self._changed = False
10052                 self._display_status()
10053
10054         def _display_status(self):
10055                 # Don't use len(self._completed_tasks) here since that also
10056                 # can include uninstall tasks.
10057                 curval_str = str(self.curval)
10058                 maxval_str = str(self.maxval)
10059                 running_str = str(self.running)
10060                 failed_str = str(self.failed)
10061                 load_avg_str = self._load_avg_str()
10062
10063                 color_output = StringIO()
10064                 plain_output = StringIO()
10065                 style_file = portage.output.ConsoleStyleFile(color_output)
10066                 style_file.write_listener = plain_output
10067                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10068                 style_writer.style_listener = style_file.new_styles
10069                 f = formatter.AbstractFormatter(style_writer)
10070
10071                 number_style = "INFORM"
10072                 f.add_literal_data("Jobs: ")
10073                 f.push_style(number_style)
10074                 f.add_literal_data(curval_str)
10075                 f.pop_style()
10076                 f.add_literal_data(" of ")
10077                 f.push_style(number_style)
10078                 f.add_literal_data(maxval_str)
10079                 f.pop_style()
10080                 f.add_literal_data(" complete")
10081
10082                 if self.running:
10083                         f.add_literal_data(", ")
10084                         f.push_style(number_style)
10085                         f.add_literal_data(running_str)
10086                         f.pop_style()
10087                         f.add_literal_data(" running")
10088
10089                 if self.failed:
10090                         f.add_literal_data(", ")
10091                         f.push_style(number_style)
10092                         f.add_literal_data(failed_str)
10093                         f.pop_style()
10094                         f.add_literal_data(" failed")
10095
10096                 padding = self._jobs_column_width - len(plain_output.getvalue())
10097                 if padding > 0:
10098                         f.add_literal_data(padding * " ")
10099
10100                 f.add_literal_data("Load avg: ")
10101                 f.add_literal_data(load_avg_str)
10102
10103                 # Truncate to fit width, to avoid making the terminal scroll if the
10104                 # line overflows (happens when the load average is large).
10105                 plain_output = plain_output.getvalue()
10106                 if self._isatty and len(plain_output) > self.width:
10107                         # Use plain_output here since it's easier to truncate
10108                         # properly than the color output which contains console
10109                         # color codes.
10110                         self._update(plain_output[:self.width])
10111                 else:
10112                         self._update(color_output.getvalue())
10113
10114                 if self.xterm_titles:
10115                         xtermTitle(" ".join(plain_output.split()))
10116
10117 class ProgressHandler(object):
10118         def __init__(self):
10119                 self.curval = 0
10120                 self.maxval = 0
10121                 self._last_update = 0
10122                 self.min_latency = 0.2
10123
10124         def onProgress(self, maxval, curval):
10125                 self.maxval = maxval
10126                 self.curval = curval
10127                 cur_time = time.time()
10128                 if cur_time - self._last_update >= self.min_latency:
10129                         self._last_update = cur_time
10130                         self.display()
10131
10132         def display(self):
10133                 raise NotImplementedError(self)
10134
10135 class Scheduler(PollScheduler):
10136
10137         _opts_ignore_blockers = \
10138                 frozenset(["--buildpkgonly",
10139                 "--fetchonly", "--fetch-all-uri",
10140                 "--nodeps", "--pretend"])
10141
10142         _opts_no_background = \
10143                 frozenset(["--pretend",
10144                 "--fetchonly", "--fetch-all-uri"])
10145
10146         _opts_no_restart = frozenset(["--buildpkgonly",
10147                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10148
10149         _bad_resume_opts = set(["--ask", "--changelog",
10150                 "--resume", "--skipfirst"])
10151
10152         _fetch_log = "/var/log/emerge-fetch.log"
10153
10154         class _iface_class(SlotObject):
10155                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10156                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10157                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10158                         "unregister")
10159
10160         class _fetch_iface_class(SlotObject):
10161                 __slots__ = ("log_file", "schedule")
10162
10163         _task_queues_class = slot_dict_class(
10164                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10165
10166         class _build_opts_class(SlotObject):
10167                 __slots__ = ("buildpkg", "buildpkgonly",
10168                         "fetch_all_uri", "fetchonly", "pretend")
10169
10170         class _binpkg_opts_class(SlotObject):
10171                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10172
10173         class _pkg_count_class(SlotObject):
10174                 __slots__ = ("curval", "maxval")
10175
10176         class _emerge_log_class(SlotObject):
10177                 __slots__ = ("xterm_titles",)
10178
10179                 def log(self, *pargs, **kwargs):
10180                         if not self.xterm_titles:
10181                                 # Avoid interference with the scheduler's status display.
10182                                 kwargs.pop("short_msg", None)
10183                         emergelog(self.xterm_titles, *pargs, **kwargs)
10184
10185         class _failed_pkg(SlotObject):
10186                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10187
10188         class _ConfigPool(object):
10189                 """Interface for a task to temporarily allocate a config
10190                 instance from a pool. This allows a task to be constructed
10191                 long before the config instance actually becomes needed, like
10192                 when prefetchers are constructed for the whole merge list."""
10193                 __slots__ = ("_root", "_allocate", "_deallocate")
10194                 def __init__(self, root, allocate, deallocate):
10195                         self._root = root
10196                         self._allocate = allocate
10197                         self._deallocate = deallocate
10198                 def allocate(self):
10199                         return self._allocate(self._root)
10200                 def deallocate(self, settings):
10201                         self._deallocate(settings)
10202
10203         class _unknown_internal_error(portage.exception.PortageException):
10204                 """
10205                 Used internally to terminate scheduling. The specific reason for
10206                 the failure should have been dumped to stderr.
10207                 """
10208                 def __init__(self, value=""):
10209                         portage.exception.PortageException.__init__(self, value)
10210
10211         def __init__(self, settings, trees, mtimedb, myopts,
10212                 spinner, mergelist, favorites, digraph):
10213                 PollScheduler.__init__(self)
10214                 self.settings = settings
10215                 self.target_root = settings["ROOT"]
10216                 self.trees = trees
10217                 self.myopts = myopts
10218                 self._spinner = spinner
10219                 self._mtimedb = mtimedb
10220                 self._mergelist = mergelist
10221                 self._favorites = favorites
10222                 self._args_set = InternalPackageSet(favorites)
10223                 self._build_opts = self._build_opts_class()
10224                 for k in self._build_opts.__slots__:
10225                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10226                 self._binpkg_opts = self._binpkg_opts_class()
10227                 for k in self._binpkg_opts.__slots__:
10228                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10229
10230                 self.curval = 0
10231                 self._logger = self._emerge_log_class()
10232                 self._task_queues = self._task_queues_class()
10233                 for k in self._task_queues.allowed_keys:
10234                         setattr(self._task_queues, k,
10235                                 SequentialTaskQueue())
10236
10237                 # Holds merges that will wait to be executed when no builds are
10238                 # executing. This is useful for system packages since dependencies
10239                 # on system packages are frequently unspecified.
10240                 self._merge_wait_queue = []
10241                 # Holds merges that have been transfered from the merge_wait_queue to
10242                 # the actual merge queue. They are removed from this list upon
10243                 # completion. Other packages can start building only when this list is
10244                 # empty.
10245                 self._merge_wait_scheduled = []
10246
10247                 # Holds system packages and their deep runtime dependencies. Before
10248                 # being merged, these packages go to merge_wait_queue, to be merged
10249                 # when no other packages are building.
10250                 self._deep_system_deps = set()
10251
10252                 # Holds packages to merge which will satisfy currently unsatisfied
10253                 # deep runtime dependencies of system packages. If this is not empty
10254                 # then no parallel builds will be spawned until it is empty. This
10255                 # minimizes the possibility that a build will fail due to the system
10256                 # being in a fragile state. For example, see bug #259954.
10257                 self._unsatisfied_system_deps = set()
10258
10259                 self._status_display = JobStatusDisplay(
10260                         xterm_titles=('notitles' not in settings.features))
10261                 self._max_load = myopts.get("--load-average")
10262                 max_jobs = myopts.get("--jobs")
10263                 if max_jobs is None:
10264                         max_jobs = 1
10265                 self._set_max_jobs(max_jobs)
10266
10267                 # The root where the currently running
10268                 # portage instance is installed.
10269                 self._running_root = trees["/"]["root_config"]
10270                 self.edebug = 0
10271                 if settings.get("PORTAGE_DEBUG", "") == "1":
10272                         self.edebug = 1
10273                 self.pkgsettings = {}
10274                 self._config_pool = {}
10275                 self._blocker_db = {}
10276                 for root in trees:
10277                         self._config_pool[root] = []
10278                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10279
10280                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10281                         schedule=self._schedule_fetch)
10282                 self._sched_iface = self._iface_class(
10283                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10284                         dblinkDisplayMerge=self._dblink_display_merge,
10285                         dblinkElog=self._dblink_elog,
10286                         dblinkEmergeLog=self._dblink_emerge_log,
10287                         fetch=fetch_iface, register=self._register,
10288                         schedule=self._schedule_wait,
10289                         scheduleSetup=self._schedule_setup,
10290                         scheduleUnpack=self._schedule_unpack,
10291                         scheduleYield=self._schedule_yield,
10292                         unregister=self._unregister)
10293
10294                 self._prefetchers = weakref.WeakValueDictionary()
10295                 self._pkg_queue = []
10296                 self._completed_tasks = set()
10297
10298                 self._failed_pkgs = []
10299                 self._failed_pkgs_all = []
10300                 self._failed_pkgs_die_msgs = []
10301                 self._post_mod_echo_msgs = []
10302                 self._parallel_fetch = False
10303                 merge_count = len([x for x in mergelist \
10304                         if isinstance(x, Package) and x.operation == "merge"])
10305                 self._pkg_count = self._pkg_count_class(
10306                         curval=0, maxval=merge_count)
10307                 self._status_display.maxval = self._pkg_count.maxval
10308
10309                 # The load average takes some time to respond when new
10310                 # jobs are added, so we need to limit the rate of adding
10311                 # new jobs.
10312                 self._job_delay_max = 10
10313                 self._job_delay_factor = 1.0
10314                 self._job_delay_exp = 1.5
10315                 self._previous_job_start_time = None
10316
10317                 self._set_digraph(digraph)
10318
10319                 # This is used to memoize the _choose_pkg() result when
10320                 # no packages can be chosen until one of the existing
10321                 # jobs completes.
10322                 self._choose_pkg_return_early = False
10323
10324                 features = self.settings.features
10325                 if "parallel-fetch" in features and \
10326                         not ("--pretend" in self.myopts or \
10327                         "--fetch-all-uri" in self.myopts or \
10328                         "--fetchonly" in self.myopts):
10329                         if "distlocks" not in features:
10330                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10331                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10332                                         "requires the distlocks feature enabled"+"\n",
10333                                         noiselevel=-1)
10334                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10335                                         "thus parallel-fetching is being disabled"+"\n",
10336                                         noiselevel=-1)
10337                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10338                         elif len(mergelist) > 1:
10339                                 self._parallel_fetch = True
10340
10341                 if self._parallel_fetch:
10342                                 # clear out existing fetch log if it exists
10343                                 try:
10344                                         open(self._fetch_log, 'w')
10345                                 except EnvironmentError:
10346                                         pass
10347
10348                 self._running_portage = None
10349                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10350                         portage.const.PORTAGE_PACKAGE_ATOM)
10351                 if portage_match:
10352                         cpv = portage_match.pop()
10353                         self._running_portage = self._pkg(cpv, "installed",
10354                                 self._running_root, installed=True)
10355
10356         def _poll(self, timeout=None):
10357                 self._schedule()
10358                 PollScheduler._poll(self, timeout=timeout)
10359
10360         def _set_max_jobs(self, max_jobs):
10361                 self._max_jobs = max_jobs
10362                 self._task_queues.jobs.max_jobs = max_jobs
10363
10364         def _background_mode(self):
10365                 """
10366                 Check if background mode is enabled and adjust states as necessary.
10367
10368                 @rtype: bool
10369                 @returns: True if background mode is enabled, False otherwise.
10370                 """
10371                 background = (self._max_jobs is True or \
10372                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10373                         not bool(self._opts_no_background.intersection(self.myopts))
10374
10375                 if background:
10376                         interactive_tasks = self._get_interactive_tasks()
10377                         if interactive_tasks:
10378                                 background = False
10379                                 writemsg_level(">>> Sending package output to stdio due " + \
10380                                         "to interactive package(s):\n",
10381                                         level=logging.INFO, noiselevel=-1)
10382                                 msg = [""]
10383                                 for pkg in interactive_tasks:
10384                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10385                                         if pkg.root != "/":
10386                                                 pkg_str += " for " + pkg.root
10387                                         msg.append(pkg_str)
10388                                 msg.append("")
10389                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10390                                         level=logging.INFO, noiselevel=-1)
10391                                 if self._max_jobs is True or self._max_jobs > 1:
10392                                         self._set_max_jobs(1)
10393                                         writemsg_level(">>> Setting --jobs=1 due " + \
10394                                                 "to the above interactive package(s)\n",
10395                                                 level=logging.INFO, noiselevel=-1)
10396
10397                 self._status_display.quiet = \
10398                         not background or \
10399                         ("--quiet" in self.myopts and \
10400                         "--verbose" not in self.myopts)
10401
10402                 self._logger.xterm_titles = \
10403                         "notitles" not in self.settings.features and \
10404                         self._status_display.quiet
10405
10406                 return background
10407
10408         def _get_interactive_tasks(self):
10409                 from portage import flatten
10410                 from portage.dep import use_reduce, paren_reduce
10411                 interactive_tasks = []
10412                 for task in self._mergelist:
10413                         if not (isinstance(task, Package) and \
10414                                 task.operation == "merge"):
10415                                 continue
10416                         try:
10417                                 properties = flatten(use_reduce(paren_reduce(
10418                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10419                         except portage.exception.InvalidDependString, e:
10420                                 show_invalid_depstring_notice(task,
10421                                         task.metadata["PROPERTIES"], str(e))
10422                                 raise self._unknown_internal_error()
10423                         if "interactive" in properties:
10424                                 interactive_tasks.append(task)
10425                 return interactive_tasks
10426
10427         def _set_digraph(self, digraph):
10428                 if "--nodeps" in self.myopts or \
10429                         (self._max_jobs is not True and self._max_jobs < 2):
10430                         # save some memory
10431                         self._digraph = None
10432                         return
10433
10434                 self._digraph = digraph
10435                 self._find_system_deps()
10436                 self._prune_digraph()
10437                 self._prevent_builddir_collisions()
10438
10439         def _find_system_deps(self):
10440                 """
10441                 Find system packages and their deep runtime dependencies. Before being
10442                 merged, these packages go to merge_wait_queue, to be merged when no
10443                 other packages are building.
10444                 """
10445                 deep_system_deps = self._deep_system_deps
10446                 deep_system_deps.clear()
10447                 deep_system_deps.update(
10448                         _find_deep_system_runtime_deps(self._digraph))
10449                 deep_system_deps.difference_update([pkg for pkg in \
10450                         deep_system_deps if pkg.operation != "merge"])
10451
10452         def _prune_digraph(self):
10453                 """
10454                 Prune any root nodes that are irrelevant.
10455                 """
10456
10457                 graph = self._digraph
10458                 completed_tasks = self._completed_tasks
10459                 removed_nodes = set()
10460                 while True:
10461                         for node in graph.root_nodes():
10462                                 if not isinstance(node, Package) or \
10463                                         (node.installed and node.operation == "nomerge") or \
10464                                         node.onlydeps or \
10465                                         node in completed_tasks:
10466                                         removed_nodes.add(node)
10467                         if removed_nodes:
10468                                 graph.difference_update(removed_nodes)
10469                         if not removed_nodes:
10470                                 break
10471                         removed_nodes.clear()
10472
10473         def _prevent_builddir_collisions(self):
10474                 """
10475                 When building stages, sometimes the same exact cpv needs to be merged
10476                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10477                 in the builddir. Currently, normal file locks would be inappropriate
10478                 for this purpose since emerge holds all of it's build dir locks from
10479                 the main process.
10480                 """
10481                 cpv_map = {}
10482                 for pkg in self._mergelist:
10483                         if not isinstance(pkg, Package):
10484                                 # a satisfied blocker
10485                                 continue
10486                         if pkg.installed:
10487                                 continue
10488                         if pkg.cpv not in cpv_map:
10489                                 cpv_map[pkg.cpv] = [pkg]
10490                                 continue
10491                         for earlier_pkg in cpv_map[pkg.cpv]:
10492                                 self._digraph.add(earlier_pkg, pkg,
10493                                         priority=DepPriority(buildtime=True))
10494                         cpv_map[pkg.cpv].append(pkg)
10495
10496         class _pkg_failure(portage.exception.PortageException):
10497                 """
10498                 An instance of this class is raised by unmerge() when
10499                 an uninstallation fails.
10500                 """
10501                 status = 1
10502                 def __init__(self, *pargs):
10503                         portage.exception.PortageException.__init__(self, pargs)
10504                         if pargs:
10505                                 self.status = pargs[0]
10506
10507         def _schedule_fetch(self, fetcher):
10508                 """
10509                 Schedule a fetcher on the fetch queue, in order to
10510                 serialize access to the fetch log.
10511                 """
10512                 self._task_queues.fetch.addFront(fetcher)
10513
10514         def _schedule_setup(self, setup_phase):
10515                 """
10516                 Schedule a setup phase on the merge queue, in order to
10517                 serialize unsandboxed access to the live filesystem.
10518                 """
10519                 self._task_queues.merge.addFront(setup_phase)
10520                 self._schedule()
10521
10522         def _schedule_unpack(self, unpack_phase):
10523                 """
10524                 Schedule an unpack phase on the unpack queue, in order
10525                 to serialize $DISTDIR access for live ebuilds.
10526                 """
10527                 self._task_queues.unpack.add(unpack_phase)
10528
10529         def _find_blockers(self, new_pkg):
10530                 """
10531                 Returns a callable which should be called only when
10532                 the vdb lock has been acquired.
10533                 """
10534                 def get_blockers():
10535                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10536                 return get_blockers
10537
10538         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10539                 if self._opts_ignore_blockers.intersection(self.myopts):
10540                         return None
10541
10542                 # Call gc.collect() here to avoid heap overflow that
10543                 # triggers 'Cannot allocate memory' errors (reported
10544                 # with python-2.5).
10545                 import gc
10546                 gc.collect()
10547
10548                 blocker_db = self._blocker_db[new_pkg.root]
10549
10550                 blocker_dblinks = []
10551                 for blocking_pkg in blocker_db.findInstalledBlockers(
10552                         new_pkg, acquire_lock=acquire_lock):
10553                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10554                                 continue
10555                         if new_pkg.cpv == blocking_pkg.cpv:
10556                                 continue
10557                         blocker_dblinks.append(portage.dblink(
10558                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10559                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10560                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10561
10562                 gc.collect()
10563
10564                 return blocker_dblinks
10565
10566         def _dblink_pkg(self, pkg_dblink):
10567                 cpv = pkg_dblink.mycpv
10568                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10569                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10570                 installed = type_name == "installed"
10571                 return self._pkg(cpv, type_name, root_config, installed=installed)
10572
10573         def _append_to_log_path(self, log_path, msg):
10574                 f = open(log_path, 'a')
10575                 try:
10576                         f.write(msg)
10577                 finally:
10578                         f.close()
10579
10580         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10581
10582                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10583                 log_file = None
10584                 out = sys.stdout
10585                 background = self._background
10586
10587                 if background and log_path is not None:
10588                         log_file = open(log_path, 'a')
10589                         out = log_file
10590
10591                 try:
10592                         for msg in msgs:
10593                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10594                 finally:
10595                         if log_file is not None:
10596                                 log_file.close()
10597
10598         def _dblink_emerge_log(self, msg):
10599                 self._logger.log(msg)
10600
10601         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10602                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10603                 background = self._background
10604
10605                 if log_path is None:
10606                         if not (background and level < logging.WARN):
10607                                 portage.util.writemsg_level(msg,
10608                                         level=level, noiselevel=noiselevel)
10609                 else:
10610                         if not background:
10611                                 portage.util.writemsg_level(msg,
10612                                         level=level, noiselevel=noiselevel)
10613                         self._append_to_log_path(log_path, msg)
10614
10615         def _dblink_ebuild_phase(self,
10616                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10617                 """
10618                 Using this callback for merge phases allows the scheduler
10619                 to run while these phases execute asynchronously, and allows
10620                 the scheduler control output handling.
10621                 """
10622
10623                 scheduler = self._sched_iface
10624                 settings = pkg_dblink.settings
10625                 pkg = self._dblink_pkg(pkg_dblink)
10626                 background = self._background
10627                 log_path = settings.get("PORTAGE_LOG_FILE")
10628
10629                 ebuild_phase = EbuildPhase(background=background,
10630                         pkg=pkg, phase=phase, scheduler=scheduler,
10631                         settings=settings, tree=pkg_dblink.treetype)
10632                 ebuild_phase.start()
10633                 ebuild_phase.wait()
10634
10635                 return ebuild_phase.returncode
10636
10637         def _generate_digests(self):
10638                 """
10639                 Generate digests if necessary for --digests or FEATURES=digest.
10640                 In order to avoid interference, this must done before parallel
10641                 tasks are started.
10642                 """
10643
10644                 if '--fetchonly' in self.myopts:
10645                         return os.EX_OK
10646
10647                 digest = '--digest' in self.myopts
10648                 if not digest:
10649                         for pkgsettings in self.pkgsettings.itervalues():
10650                                 if 'digest' in pkgsettings.features:
10651                                         digest = True
10652                                         break
10653
10654                 if not digest:
10655                         return os.EX_OK
10656
10657                 for x in self._mergelist:
10658                         if not isinstance(x, Package) or \
10659                                 x.type_name != 'ebuild' or \
10660                                 x.operation != 'merge':
10661                                 continue
10662                         pkgsettings = self.pkgsettings[x.root]
10663                         if '--digest' not in self.myopts and \
10664                                 'digest' not in pkgsettings.features:
10665                                 continue
10666                         portdb = x.root_config.trees['porttree'].dbapi
10667                         ebuild_path = portdb.findname(x.cpv)
10668                         if not ebuild_path:
10669                                 writemsg_level(
10670                                         "!!! Could not locate ebuild for '%s'.\n" \
10671                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10672                                 return 1
10673                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10674                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10675                                 writemsg_level(
10676                                         "!!! Unable to generate manifest for '%s'.\n" \
10677                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10678                                 return 1
10679
10680                 return os.EX_OK
10681
10682         def _check_manifests(self):
10683                 # Verify all the manifests now so that the user is notified of failure
10684                 # as soon as possible.
10685                 if "strict" not in self.settings.features or \
10686                         "--fetchonly" in self.myopts or \
10687                         "--fetch-all-uri" in self.myopts:
10688                         return os.EX_OK
10689
10690                 shown_verifying_msg = False
10691                 quiet_settings = {}
10692                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10693                         quiet_config = portage.config(clone=pkgsettings)
10694                         quiet_config["PORTAGE_QUIET"] = "1"
10695                         quiet_config.backup_changes("PORTAGE_QUIET")
10696                         quiet_settings[myroot] = quiet_config
10697                         del quiet_config
10698
10699                 for x in self._mergelist:
10700                         if not isinstance(x, Package) or \
10701                                 x.type_name != "ebuild":
10702                                 continue
10703
10704                         if not shown_verifying_msg:
10705                                 shown_verifying_msg = True
10706                                 self._status_msg("Verifying ebuild manifests")
10707
10708                         root_config = x.root_config
10709                         portdb = root_config.trees["porttree"].dbapi
10710                         quiet_config = quiet_settings[root_config.root]
10711                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10712                         if not portage.digestcheck([], quiet_config, strict=True):
10713                                 return 1
10714
10715                 return os.EX_OK
10716
10717         def _add_prefetchers(self):
10718
10719                 if not self._parallel_fetch:
10720                         return
10721
10722                 if self._parallel_fetch:
10723                         self._status_msg("Starting parallel fetch")
10724
10725                         prefetchers = self._prefetchers
10726                         getbinpkg = "--getbinpkg" in self.myopts
10727
10728                         # In order to avoid "waiting for lock" messages
10729                         # at the beginning, which annoy users, never
10730                         # spawn a prefetcher for the first package.
10731                         for pkg in self._mergelist[1:]:
10732                                 prefetcher = self._create_prefetcher(pkg)
10733                                 if prefetcher is not None:
10734                                         self._task_queues.fetch.add(prefetcher)
10735                                         prefetchers[pkg] = prefetcher
10736
10737         def _create_prefetcher(self, pkg):
10738                 """
10739                 @return: a prefetcher, or None if not applicable
10740                 """
10741                 prefetcher = None
10742
10743                 if not isinstance(pkg, Package):
10744                         pass
10745
10746                 elif pkg.type_name == "ebuild":
10747
10748                         prefetcher = EbuildFetcher(background=True,
10749                                 config_pool=self._ConfigPool(pkg.root,
10750                                 self._allocate_config, self._deallocate_config),
10751                                 fetchonly=1, logfile=self._fetch_log,
10752                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10753
10754                 elif pkg.type_name == "binary" and \
10755                         "--getbinpkg" in self.myopts and \
10756                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10757
10758                         prefetcher = BinpkgPrefetcher(background=True,
10759                                 pkg=pkg, scheduler=self._sched_iface)
10760
10761                 return prefetcher
10762
10763         def _is_restart_scheduled(self):
10764                 """
10765                 Check if the merge list contains a replacement
10766                 for the current running instance, that will result
10767                 in restart after merge.
10768                 @rtype: bool
10769                 @returns: True if a restart is scheduled, False otherwise.
10770                 """
10771                 if self._opts_no_restart.intersection(self.myopts):
10772                         return False
10773
10774                 mergelist = self._mergelist
10775
10776                 for i, pkg in enumerate(mergelist):
10777                         if self._is_restart_necessary(pkg) and \
10778                                 i != len(mergelist) - 1:
10779                                 return True
10780
10781                 return False
10782
10783         def _is_restart_necessary(self, pkg):
10784                 """
10785                 @return: True if merging the given package
10786                         requires restart, False otherwise.
10787                 """
10788
10789                 # Figure out if we need a restart.
10790                 if pkg.root == self._running_root.root and \
10791                         portage.match_from_list(
10792                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10793                         if self._running_portage:
10794                                 return pkg.cpv != self._running_portage.cpv
10795                         return True
10796                 return False
10797
10798         def _restart_if_necessary(self, pkg):
10799                 """
10800                 Use execv() to restart emerge. This happens
10801                 if portage upgrades itself and there are
10802                 remaining packages in the list.
10803                 """
10804
10805                 if self._opts_no_restart.intersection(self.myopts):
10806                         return
10807
10808                 if not self._is_restart_necessary(pkg):
10809                         return
10810
10811                 if pkg == self._mergelist[-1]:
10812                         return
10813
10814                 self._main_loop_cleanup()
10815
10816                 logger = self._logger
10817                 pkg_count = self._pkg_count
10818                 mtimedb = self._mtimedb
10819                 bad_resume_opts = self._bad_resume_opts
10820
10821                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10822                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10823
10824                 logger.log(" *** RESTARTING " + \
10825                         "emerge via exec() after change of " + \
10826                         "portage version.")
10827
10828                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10829                 mtimedb.commit()
10830                 portage.run_exitfuncs()
10831                 mynewargv = [sys.argv[0], "--resume"]
10832                 resume_opts = self.myopts.copy()
10833                 # For automatic resume, we need to prevent
10834                 # any of bad_resume_opts from leaking in
10835                 # via EMERGE_DEFAULT_OPTS.
10836                 resume_opts["--ignore-default-opts"] = True
10837                 for myopt, myarg in resume_opts.iteritems():
10838                         if myopt not in bad_resume_opts:
10839                                 if myarg is True:
10840                                         mynewargv.append(myopt)
10841                                 else:
10842                                         mynewargv.append(myopt +"="+ str(myarg))
10843                 # priority only needs to be adjusted on the first run
10844                 os.environ["PORTAGE_NICENESS"] = "0"
10845                 os.execv(mynewargv[0], mynewargv)
10846
10847         def merge(self):
10848
10849                 if "--resume" in self.myopts:
10850                         # We're resuming.
10851                         portage.writemsg_stdout(
10852                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10853                         self._logger.log(" *** Resuming merge...")
10854
10855                 self._save_resume_list()
10856
10857                 try:
10858                         self._background = self._background_mode()
10859                 except self._unknown_internal_error:
10860                         return 1
10861
10862                 for root in self.trees:
10863                         root_config = self.trees[root]["root_config"]
10864
10865                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10866                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10867                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10868                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10869                         if not tmpdir or not os.path.isdir(tmpdir):
10870                                 msg = "The directory specified in your " + \
10871                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10872                                 "does not exist. Please create this " + \
10873                                 "directory or correct your PORTAGE_TMPDIR setting."
10874                                 msg = textwrap.wrap(msg, 70)
10875                                 out = portage.output.EOutput()
10876                                 for l in msg:
10877                                         out.eerror(l)
10878                                 return 1
10879
10880                         if self._background:
10881                                 root_config.settings.unlock()
10882                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10883                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10884                                 root_config.settings.lock()
10885
10886                         self.pkgsettings[root] = portage.config(
10887                                 clone=root_config.settings)
10888
10889                 rval = self._generate_digests()
10890                 if rval != os.EX_OK:
10891                         return rval
10892
10893                 rval = self._check_manifests()
10894                 if rval != os.EX_OK:
10895                         return rval
10896
10897                 keep_going = "--keep-going" in self.myopts
10898                 fetchonly = self._build_opts.fetchonly
10899                 mtimedb = self._mtimedb
10900                 failed_pkgs = self._failed_pkgs
10901
10902                 while True:
10903                         rval = self._merge()
10904                         if rval == os.EX_OK or fetchonly or not keep_going:
10905                                 break
10906                         if "resume" not in mtimedb:
10907                                 break
10908                         mergelist = self._mtimedb["resume"].get("mergelist")
10909                         if not mergelist:
10910                                 break
10911
10912                         if not failed_pkgs:
10913                                 break
10914
10915                         for failed_pkg in failed_pkgs:
10916                                 mergelist.remove(list(failed_pkg.pkg))
10917
10918                         self._failed_pkgs_all.extend(failed_pkgs)
10919                         del failed_pkgs[:]
10920
10921                         if not mergelist:
10922                                 break
10923
10924                         if not self._calc_resume_list():
10925                                 break
10926
10927                         clear_caches(self.trees)
10928                         if not self._mergelist:
10929                                 break
10930
10931                         self._save_resume_list()
10932                         self._pkg_count.curval = 0
10933                         self._pkg_count.maxval = len([x for x in self._mergelist \
10934                                 if isinstance(x, Package) and x.operation == "merge"])
10935                         self._status_display.maxval = self._pkg_count.maxval
10936
10937                 self._logger.log(" *** Finished. Cleaning up...")
10938
10939                 if failed_pkgs:
10940                         self._failed_pkgs_all.extend(failed_pkgs)
10941                         del failed_pkgs[:]
10942
10943                 background = self._background
10944                 failure_log_shown = False
10945                 if background and len(self._failed_pkgs_all) == 1:
10946                         # If only one package failed then just show it's
10947                         # whole log for easy viewing.
10948                         failed_pkg = self._failed_pkgs_all[-1]
10949                         build_dir = failed_pkg.build_dir
10950                         log_file = None
10951
10952                         log_paths = [failed_pkg.build_log]
10953
10954                         log_path = self._locate_failure_log(failed_pkg)
10955                         if log_path is not None:
10956                                 try:
10957                                         log_file = open(log_path)
10958                                 except IOError:
10959                                         pass
10960
10961                         if log_file is not None:
10962                                 try:
10963                                         for line in log_file:
10964                                                 writemsg_level(line, noiselevel=-1)
10965                                 finally:
10966                                         log_file.close()
10967                                 failure_log_shown = True
10968
10969                 # Dump mod_echo output now since it tends to flood the terminal.
10970                 # This allows us to avoid having more important output, generated
10971                 # later, from being swept away by the mod_echo output.
10972                 mod_echo_output =  _flush_elog_mod_echo()
10973
10974                 if background and not failure_log_shown and \
10975                         self._failed_pkgs_all and \
10976                         self._failed_pkgs_die_msgs and \
10977                         not mod_echo_output:
10978
10979                         printer = portage.output.EOutput()
10980                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10981                                 root_msg = ""
10982                                 if mysettings["ROOT"] != "/":
10983                                         root_msg = " merged to %s" % mysettings["ROOT"]
10984                                 print
10985                                 printer.einfo("Error messages for package %s%s:" % \
10986                                         (colorize("INFORM", key), root_msg))
10987                                 print
10988                                 for phase in portage.const.EBUILD_PHASES:
10989                                         if phase not in logentries:
10990                                                 continue
10991                                         for msgtype, msgcontent in logentries[phase]:
10992                                                 if isinstance(msgcontent, basestring):
10993                                                         msgcontent = [msgcontent]
10994                                                 for line in msgcontent:
10995                                                         printer.eerror(line.strip("\n"))
10996
10997                 if self._post_mod_echo_msgs:
10998                         for msg in self._post_mod_echo_msgs:
10999                                 msg()
11000
11001                 if len(self._failed_pkgs_all) > 1 or \
11002                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
11003                         if len(self._failed_pkgs_all) > 1:
11004                                 msg = "The following %d packages have " % \
11005                                         len(self._failed_pkgs_all) + \
11006                                         "failed to build or install:"
11007                         else:
11008                                 msg = "The following package has " + \
11009                                         "failed to build or install:"
11010                         prefix = bad(" * ")
11011                         writemsg(prefix + "\n", noiselevel=-1)
11012                         from textwrap import wrap
11013                         for line in wrap(msg, 72):
11014                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
11015                         writemsg(prefix + "\n", noiselevel=-1)
11016                         for failed_pkg in self._failed_pkgs_all:
11017                                 writemsg("%s\t%s\n" % (prefix,
11018                                         colorize("INFORM", str(failed_pkg.pkg))),
11019                                         noiselevel=-1)
11020                         writemsg(prefix + "\n", noiselevel=-1)
11021
11022                 return rval
11023
11024         def _elog_listener(self, mysettings, key, logentries, fulltext):
11025                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
11026                 if errors:
11027                         self._failed_pkgs_die_msgs.append(
11028                                 (mysettings, key, errors))
11029
11030         def _locate_failure_log(self, failed_pkg):
11031
11032                 build_dir = failed_pkg.build_dir
11033                 log_file = None
11034
11035                 log_paths = [failed_pkg.build_log]
11036
11037                 for log_path in log_paths:
11038                         if not log_path:
11039                                 continue
11040
11041                         try:
11042                                 log_size = os.stat(log_path).st_size
11043                         except OSError:
11044                                 continue
11045
11046                         if log_size == 0:
11047                                 continue
11048
11049                         return log_path
11050
11051                 return None
11052
11053         def _add_packages(self):
11054                 pkg_queue = self._pkg_queue
11055                 for pkg in self._mergelist:
11056                         if isinstance(pkg, Package):
11057                                 pkg_queue.append(pkg)
11058                         elif isinstance(pkg, Blocker):
11059                                 pass
11060
11061         def _system_merge_started(self, merge):
11062                 """
11063                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11064                 """
11065                 graph = self._digraph
11066                 if graph is None:
11067                         return
11068                 pkg = merge.merge.pkg
11069
11070                 # Skip this if $ROOT != / since it shouldn't matter if there
11071                 # are unsatisfied system runtime deps in this case.
11072                 if pkg.root != '/':
11073                         return
11074
11075                 completed_tasks = self._completed_tasks
11076                 unsatisfied = self._unsatisfied_system_deps
11077
11078                 def ignore_non_runtime_or_satisfied(priority):
11079                         """
11080                         Ignore non-runtime and satisfied runtime priorities.
11081                         """
11082                         if isinstance(priority, DepPriority) and \
11083                                 not priority.satisfied and \
11084                                 (priority.runtime or priority.runtime_post):
11085                                 return False
11086                         return True
11087
11088                 # When checking for unsatisfied runtime deps, only check
11089                 # direct deps since indirect deps are checked when the
11090                 # corresponding parent is merged.
11091                 for child in graph.child_nodes(pkg,
11092                         ignore_priority=ignore_non_runtime_or_satisfied):
11093                         if not isinstance(child, Package) or \
11094                                 child.operation == 'uninstall':
11095                                 continue
11096                         if child is pkg:
11097                                 continue
11098                         if child.operation == 'merge' and \
11099                                 child not in completed_tasks:
11100                                 unsatisfied.add(child)
11101
11102         def _merge_wait_exit_handler(self, task):
11103                 self._merge_wait_scheduled.remove(task)
11104                 self._merge_exit(task)
11105
11106         def _merge_exit(self, merge):
11107                 self._do_merge_exit(merge)
11108                 self._deallocate_config(merge.merge.settings)
11109                 if merge.returncode == os.EX_OK and \
11110                         not merge.merge.pkg.installed:
11111                         self._status_display.curval += 1
11112                 self._status_display.merges = len(self._task_queues.merge)
11113                 self._schedule()
11114
11115         def _do_merge_exit(self, merge):
11116                 pkg = merge.merge.pkg
11117                 if merge.returncode != os.EX_OK:
11118                         settings = merge.merge.settings
11119                         build_dir = settings.get("PORTAGE_BUILDDIR")
11120                         build_log = settings.get("PORTAGE_LOG_FILE")
11121
11122                         self._failed_pkgs.append(self._failed_pkg(
11123                                 build_dir=build_dir, build_log=build_log,
11124                                 pkg=pkg,
11125                                 returncode=merge.returncode))
11126                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11127
11128                         self._status_display.failed = len(self._failed_pkgs)
11129                         return
11130
11131                 self._task_complete(pkg)
11132                 pkg_to_replace = merge.merge.pkg_to_replace
11133                 if pkg_to_replace is not None:
11134                         # When a package is replaced, mark it's uninstall
11135                         # task complete (if any).
11136                         uninst_hash_key = \
11137                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11138                         self._task_complete(uninst_hash_key)
11139
11140                 if pkg.installed:
11141                         return
11142
11143                 self._restart_if_necessary(pkg)
11144
11145                 # Call mtimedb.commit() after each merge so that
11146                 # --resume still works after being interrupted
11147                 # by reboot, sigkill or similar.
11148                 mtimedb = self._mtimedb
11149                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11150                 if not mtimedb["resume"]["mergelist"]:
11151                         del mtimedb["resume"]
11152                 mtimedb.commit()
11153
11154         def _build_exit(self, build):
11155                 if build.returncode == os.EX_OK:
11156                         self.curval += 1
11157                         merge = PackageMerge(merge=build)
11158                         if not build.build_opts.buildpkgonly and \
11159                                 build.pkg in self._deep_system_deps:
11160                                 # Since dependencies on system packages are frequently
11161                                 # unspecified, merge them only when no builds are executing.
11162                                 self._merge_wait_queue.append(merge)
11163                                 merge.addStartListener(self._system_merge_started)
11164                         else:
11165                                 merge.addExitListener(self._merge_exit)
11166                                 self._task_queues.merge.add(merge)
11167                                 self._status_display.merges = len(self._task_queues.merge)
11168                 else:
11169                         settings = build.settings
11170                         build_dir = settings.get("PORTAGE_BUILDDIR")
11171                         build_log = settings.get("PORTAGE_LOG_FILE")
11172
11173                         self._failed_pkgs.append(self._failed_pkg(
11174                                 build_dir=build_dir, build_log=build_log,
11175                                 pkg=build.pkg,
11176                                 returncode=build.returncode))
11177                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11178
11179                         self._status_display.failed = len(self._failed_pkgs)
11180                         self._deallocate_config(build.settings)
11181                 self._jobs -= 1
11182                 self._status_display.running = self._jobs
11183                 self._schedule()
11184
11185         def _extract_exit(self, build):
11186                 self._build_exit(build)
11187
11188         def _task_complete(self, pkg):
11189                 self._completed_tasks.add(pkg)
11190                 self._unsatisfied_system_deps.discard(pkg)
11191                 self._choose_pkg_return_early = False
11192
11193         def _merge(self):
11194
11195                 self._add_prefetchers()
11196                 self._add_packages()
11197                 pkg_queue = self._pkg_queue
11198                 failed_pkgs = self._failed_pkgs
11199                 portage.locks._quiet = self._background
11200                 portage.elog._emerge_elog_listener = self._elog_listener
11201                 rval = os.EX_OK
11202
11203                 try:
11204                         self._main_loop()
11205                 finally:
11206                         self._main_loop_cleanup()
11207                         portage.locks._quiet = False
11208                         portage.elog._emerge_elog_listener = None
11209                         if failed_pkgs:
11210                                 rval = failed_pkgs[-1].returncode
11211
11212                 return rval
11213
11214         def _main_loop_cleanup(self):
11215                 del self._pkg_queue[:]
11216                 self._completed_tasks.clear()
11217                 self._deep_system_deps.clear()
11218                 self._unsatisfied_system_deps.clear()
11219                 self._choose_pkg_return_early = False
11220                 self._status_display.reset()
11221                 self._digraph = None
11222                 self._task_queues.fetch.clear()
11223
11224         def _choose_pkg(self):
11225                 """
11226                 Choose a task that has all it's dependencies satisfied.
11227                 """
11228
11229                 if self._choose_pkg_return_early:
11230                         return None
11231
11232                 if self._digraph is None:
11233                         if (self._jobs or self._task_queues.merge) and \
11234                                 not ("--nodeps" in self.myopts and \
11235                                 (self._max_jobs is True or self._max_jobs > 1)):
11236                                 self._choose_pkg_return_early = True
11237                                 return None
11238                         return self._pkg_queue.pop(0)
11239
11240                 if not (self._jobs or self._task_queues.merge):
11241                         return self._pkg_queue.pop(0)
11242
11243                 self._prune_digraph()
11244
11245                 chosen_pkg = None
11246                 later = set(self._pkg_queue)
11247                 for pkg in self._pkg_queue:
11248                         later.remove(pkg)
11249                         if not self._dependent_on_scheduled_merges(pkg, later):
11250                                 chosen_pkg = pkg
11251                                 break
11252
11253                 if chosen_pkg is not None:
11254                         self._pkg_queue.remove(chosen_pkg)
11255
11256                 if chosen_pkg is None:
11257                         # There's no point in searching for a package to
11258                         # choose until at least one of the existing jobs
11259                         # completes.
11260                         self._choose_pkg_return_early = True
11261
11262                 return chosen_pkg
11263
11264         def _dependent_on_scheduled_merges(self, pkg, later):
11265                 """
11266                 Traverse the subgraph of the given packages deep dependencies
11267                 to see if it contains any scheduled merges.
11268                 @param pkg: a package to check dependencies for
11269                 @type pkg: Package
11270                 @param later: packages for which dependence should be ignored
11271                         since they will be merged later than pkg anyway and therefore
11272                         delaying the merge of pkg will not result in a more optimal
11273                         merge order
11274                 @type later: set
11275                 @rtype: bool
11276                 @returns: True if the package is dependent, False otherwise.
11277                 """
11278
11279                 graph = self._digraph
11280                 completed_tasks = self._completed_tasks
11281
11282                 dependent = False
11283                 traversed_nodes = set([pkg])
11284                 direct_deps = graph.child_nodes(pkg)
11285                 node_stack = direct_deps
11286                 direct_deps = frozenset(direct_deps)
11287                 while node_stack:
11288                         node = node_stack.pop()
11289                         if node in traversed_nodes:
11290                                 continue
11291                         traversed_nodes.add(node)
11292                         if not ((node.installed and node.operation == "nomerge") or \
11293                                 (node.operation == "uninstall" and \
11294                                 node not in direct_deps) or \
11295                                 node in completed_tasks or \
11296                                 node in later):
11297                                 dependent = True
11298                                 break
11299                         node_stack.extend(graph.child_nodes(node))
11300
11301                 return dependent
11302
11303         def _allocate_config(self, root):
11304                 """
11305                 Allocate a unique config instance for a task in order
11306                 to prevent interference between parallel tasks.
11307                 """
11308                 if self._config_pool[root]:
11309                         temp_settings = self._config_pool[root].pop()
11310                 else:
11311                         temp_settings = portage.config(clone=self.pkgsettings[root])
11312                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11313                 # performance reasons, call it here to make sure all settings from the
11314                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11315                 temp_settings.reload()
11316                 temp_settings.reset()
11317                 return temp_settings
11318
11319         def _deallocate_config(self, settings):
11320                 self._config_pool[settings["ROOT"]].append(settings)
11321
11322         def _main_loop(self):
11323
11324                 # Only allow 1 job max if a restart is scheduled
11325                 # due to portage update.
11326                 if self._is_restart_scheduled() or \
11327                         self._opts_no_background.intersection(self.myopts):
11328                         self._set_max_jobs(1)
11329
11330                 merge_queue = self._task_queues.merge
11331
11332                 while self._schedule():
11333                         if self._poll_event_handlers:
11334                                 self._poll_loop()
11335
11336                 while True:
11337                         self._schedule()
11338                         if not (self._jobs or merge_queue):
11339                                 break
11340                         if self._poll_event_handlers:
11341                                 self._poll_loop()
11342
11343         def _keep_scheduling(self):
11344                 return bool(self._pkg_queue and \
11345                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11346
11347         def _schedule_tasks(self):
11348
11349                 # When the number of jobs drops to zero, process all waiting merges.
11350                 if not self._jobs and self._merge_wait_queue:
11351                         for task in self._merge_wait_queue:
11352                                 task.addExitListener(self._merge_wait_exit_handler)
11353                                 self._task_queues.merge.add(task)
11354                         self._status_display.merges = len(self._task_queues.merge)
11355                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11356                         del self._merge_wait_queue[:]
11357
11358                 self._schedule_tasks_imp()
11359                 self._status_display.display()
11360
11361                 state_change = 0
11362                 for q in self._task_queues.values():
11363                         if q.schedule():
11364                                 state_change += 1
11365
11366                 # Cancel prefetchers if they're the only reason
11367                 # the main poll loop is still running.
11368                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11369                         not (self._jobs or self._task_queues.merge) and \
11370                         self._task_queues.fetch:
11371                         self._task_queues.fetch.clear()
11372                         state_change += 1
11373
11374                 if state_change:
11375                         self._schedule_tasks_imp()
11376                         self._status_display.display()
11377
11378                 return self._keep_scheduling()
11379
11380         def _job_delay(self):
11381                 """
11382                 @rtype: bool
11383                 @returns: True if job scheduling should be delayed, False otherwise.
11384                 """
11385
11386                 if self._jobs and self._max_load is not None:
11387
11388                         current_time = time.time()
11389
11390                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11391                         if delay > self._job_delay_max:
11392                                 delay = self._job_delay_max
11393                         if (current_time - self._previous_job_start_time) < delay:
11394                                 return True
11395
11396                 return False
11397
11398         def _schedule_tasks_imp(self):
11399                 """
11400                 @rtype: bool
11401                 @returns: True if state changed, False otherwise.
11402                 """
11403
11404                 state_change = 0
11405
11406                 while True:
11407
11408                         if not self._keep_scheduling():
11409                                 return bool(state_change)
11410
11411                         if self._choose_pkg_return_early or \
11412                                 self._merge_wait_scheduled or \
11413                                 (self._jobs and self._unsatisfied_system_deps) or \
11414                                 not self._can_add_job() or \
11415                                 self._job_delay():
11416                                 return bool(state_change)
11417
11418                         pkg = self._choose_pkg()
11419                         if pkg is None:
11420                                 return bool(state_change)
11421
11422                         state_change += 1
11423
11424                         if not pkg.installed:
11425                                 self._pkg_count.curval += 1
11426
11427                         task = self._task(pkg)
11428
11429                         if pkg.installed:
11430                                 merge = PackageMerge(merge=task)
11431                                 merge.addExitListener(self._merge_exit)
11432                                 self._task_queues.merge.add(merge)
11433
11434                         elif pkg.built:
11435                                 self._jobs += 1
11436                                 self._previous_job_start_time = time.time()
11437                                 self._status_display.running = self._jobs
11438                                 task.addExitListener(self._extract_exit)
11439                                 self._task_queues.jobs.add(task)
11440
11441                         else:
11442                                 self._jobs += 1
11443                                 self._previous_job_start_time = time.time()
11444                                 self._status_display.running = self._jobs
11445                                 task.addExitListener(self._build_exit)
11446                                 self._task_queues.jobs.add(task)
11447
11448                 return bool(state_change)
11449
11450         def _task(self, pkg):
11451
11452                 pkg_to_replace = None
11453                 if pkg.operation != "uninstall":
11454                         vardb = pkg.root_config.trees["vartree"].dbapi
11455                         previous_cpv = vardb.match(pkg.slot_atom)
11456                         if previous_cpv:
11457                                 previous_cpv = previous_cpv.pop()
11458                                 pkg_to_replace = self._pkg(previous_cpv,
11459                                         "installed", pkg.root_config, installed=True)
11460
11461                 task = MergeListItem(args_set=self._args_set,
11462                         background=self._background, binpkg_opts=self._binpkg_opts,
11463                         build_opts=self._build_opts,
11464                         config_pool=self._ConfigPool(pkg.root,
11465                         self._allocate_config, self._deallocate_config),
11466                         emerge_opts=self.myopts,
11467                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11468                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11469                         pkg_to_replace=pkg_to_replace,
11470                         prefetcher=self._prefetchers.get(pkg),
11471                         scheduler=self._sched_iface,
11472                         settings=self._allocate_config(pkg.root),
11473                         statusMessage=self._status_msg,
11474                         world_atom=self._world_atom)
11475
11476                 return task
11477
11478         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11479                 pkg = failed_pkg.pkg
11480                 msg = "%s to %s %s" % \
11481                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11482                 if pkg.root != "/":
11483                         msg += " %s %s" % (preposition, pkg.root)
11484
11485                 log_path = self._locate_failure_log(failed_pkg)
11486                 if log_path is not None:
11487                         msg += ", Log file:"
11488                 self._status_msg(msg)
11489
11490                 if log_path is not None:
11491                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11492
11493         def _status_msg(self, msg):
11494                 """
11495                 Display a brief status message (no newlines) in the status display.
11496                 This is called by tasks to provide feedback to the user. This
11497                 delegates the resposibility of generating \r and \n control characters,
11498                 to guarantee that lines are created or erased when necessary and
11499                 appropriate.
11500
11501                 @type msg: str
11502                 @param msg: a brief status message (no newlines allowed)
11503                 """
11504                 if not self._background:
11505                         writemsg_level("\n")
11506                 self._status_display.displayMessage(msg)
11507
11508         def _save_resume_list(self):
11509                 """
11510                 Do this before verifying the ebuild Manifests since it might
11511                 be possible for the user to use --resume --skipfirst get past
11512                 a non-essential package with a broken digest.
11513                 """
11514                 mtimedb = self._mtimedb
11515                 mtimedb["resume"]["mergelist"] = [list(x) \
11516                         for x in self._mergelist \
11517                         if isinstance(x, Package) and x.operation == "merge"]
11518
11519                 mtimedb.commit()
11520
11521         def _calc_resume_list(self):
11522                 """
11523                 Use the current resume list to calculate a new one,
11524                 dropping any packages with unsatisfied deps.
11525                 @rtype: bool
11526                 @returns: True if successful, False otherwise.
11527                 """
11528                 print colorize("GOOD", "*** Resuming merge...")
11529
11530                 if self._show_list():
11531                         if "--tree" in self.myopts:
11532                                 portage.writemsg_stdout("\n" + \
11533                                         darkgreen("These are the packages that " + \
11534                                         "would be merged, in reverse order:\n\n"))
11535
11536                         else:
11537                                 portage.writemsg_stdout("\n" + \
11538                                         darkgreen("These are the packages that " + \
11539                                         "would be merged, in order:\n\n"))
11540
11541                 show_spinner = "--quiet" not in self.myopts and \
11542                         "--nodeps" not in self.myopts
11543
11544                 if show_spinner:
11545                         print "Calculating dependencies  ",
11546
11547                 myparams = create_depgraph_params(self.myopts, None)
11548                 success = False
11549                 e = None
11550                 try:
11551                         success, mydepgraph, dropped_tasks = resume_depgraph(
11552                                 self.settings, self.trees, self._mtimedb, self.myopts,
11553                                 myparams, self._spinner)
11554                 except depgraph.UnsatisfiedResumeDep, exc:
11555                         # rename variable to avoid python-3.0 error:
11556                         # SyntaxError: can not delete variable 'e' referenced in nested
11557                         #              scope
11558                         e = exc
11559                         mydepgraph = e.depgraph
11560                         dropped_tasks = set()
11561
11562                 if show_spinner:
11563                         print "\b\b... done!"
11564
11565                 if e is not None:
11566                         def unsatisfied_resume_dep_msg():
11567                                 mydepgraph.display_problems()
11568                                 out = portage.output.EOutput()
11569                                 out.eerror("One or more packages are either masked or " + \
11570                                         "have missing dependencies:")
11571                                 out.eerror("")
11572                                 indent = "  "
11573                                 show_parents = set()
11574                                 for dep in e.value:
11575                                         if dep.parent in show_parents:
11576                                                 continue
11577                                         show_parents.add(dep.parent)
11578                                         if dep.atom is None:
11579                                                 out.eerror(indent + "Masked package:")
11580                                                 out.eerror(2 * indent + str(dep.parent))
11581                                                 out.eerror("")
11582                                         else:
11583                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11584                                                 out.eerror(2 * indent + str(dep.parent))
11585                                                 out.eerror("")
11586                                 msg = "The resume list contains packages " + \
11587                                         "that are either masked or have " + \
11588                                         "unsatisfied dependencies. " + \
11589                                         "Please restart/continue " + \
11590                                         "the operation manually, or use --skipfirst " + \
11591                                         "to skip the first package in the list and " + \
11592                                         "any other packages that may be " + \
11593                                         "masked or have missing dependencies."
11594                                 for line in textwrap.wrap(msg, 72):
11595                                         out.eerror(line)
11596                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11597                         return False
11598
11599                 if success and self._show_list():
11600                         mylist = mydepgraph.altlist()
11601                         if mylist:
11602                                 if "--tree" in self.myopts:
11603                                         mylist.reverse()
11604                                 mydepgraph.display(mylist, favorites=self._favorites)
11605
11606                 if not success:
11607                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11608                         return False
11609                 mydepgraph.display_problems()
11610
11611                 mylist = mydepgraph.altlist()
11612                 mydepgraph.break_refs(mylist)
11613                 mydepgraph.break_refs(dropped_tasks)
11614                 self._mergelist = mylist
11615                 self._set_digraph(mydepgraph.schedulerGraph())
11616
11617                 msg_width = 75
11618                 for task in dropped_tasks:
11619                         if not (isinstance(task, Package) and task.operation == "merge"):
11620                                 continue
11621                         pkg = task
11622                         msg = "emerge --keep-going:" + \
11623                                 " %s" % (pkg.cpv,)
11624                         if pkg.root != "/":
11625                                 msg += " for %s" % (pkg.root,)
11626                         msg += " dropped due to unsatisfied dependency."
11627                         for line in textwrap.wrap(msg, msg_width):
11628                                 eerror(line, phase="other", key=pkg.cpv)
11629                         settings = self.pkgsettings[pkg.root]
11630                         # Ensure that log collection from $T is disabled inside
11631                         # elog_process(), since any logs that might exist are
11632                         # not valid here.
11633                         settings.pop("T", None)
11634                         portage.elog.elog_process(pkg.cpv, settings)
11635                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11636
11637                 return True
11638
11639         def _show_list(self):
11640                 myopts = self.myopts
11641                 if "--quiet" not in myopts and \
11642                         ("--ask" in myopts or "--tree" in myopts or \
11643                         "--verbose" in myopts):
11644                         return True
11645                 return False
11646
11647         def _world_atom(self, pkg):
11648                 """
11649                 Add the package to the world file, but only if
11650                 it's supposed to be added. Otherwise, do nothing.
11651                 """
11652
11653                 if set(("--buildpkgonly", "--fetchonly",
11654                         "--fetch-all-uri",
11655                         "--oneshot", "--onlydeps",
11656                         "--pretend")).intersection(self.myopts):
11657                         return
11658
11659                 if pkg.root != self.target_root:
11660                         return
11661
11662                 args_set = self._args_set
11663                 if not args_set.findAtomForPackage(pkg):
11664                         return
11665
11666                 logger = self._logger
11667                 pkg_count = self._pkg_count
11668                 root_config = pkg.root_config
11669                 world_set = root_config.sets["world"]
11670                 world_locked = False
11671                 if hasattr(world_set, "lock"):
11672                         world_set.lock()
11673                         world_locked = True
11674
11675                 try:
11676                         if hasattr(world_set, "load"):
11677                                 world_set.load() # maybe it's changed on disk
11678
11679                         atom = create_world_atom(pkg, args_set, root_config)
11680                         if atom:
11681                                 if hasattr(world_set, "add"):
11682                                         self._status_msg(('Recording %s in "world" ' + \
11683                                                 'favorites file...') % atom)
11684                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11685                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11686                                         world_set.add(atom)
11687                                 else:
11688                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11689                                                 (atom,), level=logging.WARN, noiselevel=-1)
11690                 finally:
11691                         if world_locked:
11692                                 world_set.unlock()
11693
11694         def _pkg(self, cpv, type_name, root_config, installed=False):
11695                 """
11696                 Get a package instance from the cache, or create a new
11697                 one if necessary. Raises KeyError from aux_get if it
11698                 failures for some reason (package does not exist or is
11699                 corrupt).
11700                 """
11701                 operation = "merge"
11702                 if installed:
11703                         operation = "nomerge"
11704
11705                 if self._digraph is not None:
11706                         # Reuse existing instance when available.
11707                         pkg = self._digraph.get(
11708                                 (type_name, root_config.root, cpv, operation))
11709                         if pkg is not None:
11710                                 return pkg
11711
11712                 tree_type = depgraph.pkg_tree_map[type_name]
11713                 db = root_config.trees[tree_type].dbapi
11714                 db_keys = list(self.trees[root_config.root][
11715                         tree_type].dbapi._aux_cache_keys)
11716                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11717                 pkg = Package(cpv=cpv, metadata=metadata,
11718                         root_config=root_config, installed=installed)
11719                 if type_name == "ebuild":
11720                         settings = self.pkgsettings[root_config.root]
11721                         settings.setcpv(pkg)
11722                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11723                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11724
11725                 return pkg
11726
11727 class MetadataRegen(PollScheduler):
11728
11729         def __init__(self, portdb, cp_iter=None, consumer=None,
11730                 max_jobs=None, max_load=None):
11731                 PollScheduler.__init__(self)
11732                 self._portdb = portdb
11733                 self._global_cleanse = False
11734                 if cp_iter is None:
11735                         cp_iter = self._iter_every_cp()
11736                         # We can globally cleanse stale cache only if we
11737                         # iterate over every single cp.
11738                         self._global_cleanse = True
11739                 self._cp_iter = cp_iter
11740                 self._consumer = consumer
11741
11742                 if max_jobs is None:
11743                         max_jobs = 1
11744
11745                 self._max_jobs = max_jobs
11746                 self._max_load = max_load
11747                 self._sched_iface = self._sched_iface_class(
11748                         register=self._register,
11749                         schedule=self._schedule_wait,
11750                         unregister=self._unregister)
11751
11752                 self._valid_pkgs = set()
11753                 self._cp_set = set()
11754                 self._process_iter = self._iter_metadata_processes()
11755                 self.returncode = os.EX_OK
11756                 self._error_count = 0
11757
11758         def _iter_every_cp(self):
11759                 every_cp = self._portdb.cp_all()
11760                 every_cp.sort(reverse=True)
11761                 try:
11762                         while True:
11763                                 yield every_cp.pop()
11764                 except IndexError:
11765                         pass
11766
11767         def _iter_metadata_processes(self):
11768                 portdb = self._portdb
11769                 valid_pkgs = self._valid_pkgs
11770                 cp_set = self._cp_set
11771                 consumer = self._consumer
11772
11773                 for cp in self._cp_iter:
11774                         cp_set.add(cp)
11775                         portage.writemsg_stdout("Processing %s\n" % cp)
11776                         cpv_list = portdb.cp_list(cp)
11777                         for cpv in cpv_list:
11778                                 valid_pkgs.add(cpv)
11779                                 ebuild_path, repo_path = portdb.findname2(cpv)
11780                                 metadata, st, emtime = portdb._pull_valid_cache(
11781                                         cpv, ebuild_path, repo_path)
11782                                 if metadata is not None:
11783                                         if consumer is not None:
11784                                                 consumer(cpv, ebuild_path,
11785                                                         repo_path, metadata)
11786                                         continue
11787
11788                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11789                                         ebuild_mtime=emtime,
11790                                         metadata_callback=portdb._metadata_callback,
11791                                         portdb=portdb, repo_path=repo_path,
11792                                         settings=portdb.doebuild_settings)
11793
11794         def run(self):
11795
11796                 portdb = self._portdb
11797                 from portage.cache.cache_errors import CacheError
11798                 dead_nodes = {}
11799
11800                 while self._schedule():
11801                         self._poll_loop()
11802
11803                 while self._jobs:
11804                         self._poll_loop()
11805
11806                 if self._global_cleanse:
11807                         for mytree in portdb.porttrees:
11808                                 try:
11809                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11810                                 except CacheError, e:
11811                                         portage.writemsg("Error listing cache entries for " + \
11812                                                 "'%s': %s, continuing...\n" % (mytree, e),
11813                                                 noiselevel=-1)
11814                                         del e
11815                                         dead_nodes = None
11816                                         break
11817                 else:
11818                         cp_set = self._cp_set
11819                         cpv_getkey = portage.cpv_getkey
11820                         for mytree in portdb.porttrees:
11821                                 try:
11822                                         dead_nodes[mytree] = set(cpv for cpv in \
11823                                                 portdb.auxdb[mytree].iterkeys() \
11824                                                 if cpv_getkey(cpv) in cp_set)
11825                                 except CacheError, e:
11826                                         portage.writemsg("Error listing cache entries for " + \
11827                                                 "'%s': %s, continuing...\n" % (mytree, e),
11828                                                 noiselevel=-1)
11829                                         del e
11830                                         dead_nodes = None
11831                                         break
11832
11833                 if dead_nodes:
11834                         for y in self._valid_pkgs:
11835                                 for mytree in portdb.porttrees:
11836                                         if portdb.findname2(y, mytree=mytree)[0]:
11837                                                 dead_nodes[mytree].discard(y)
11838
11839                         for mytree, nodes in dead_nodes.iteritems():
11840                                 auxdb = portdb.auxdb[mytree]
11841                                 for y in nodes:
11842                                         try:
11843                                                 del auxdb[y]
11844                                         except (KeyError, CacheError):
11845                                                 pass
11846
11847         def _schedule_tasks(self):
11848                 """
11849                 @rtype: bool
11850                 @returns: True if there may be remaining tasks to schedule,
11851                         False otherwise.
11852                 """
11853                 while self._can_add_job():
11854                         try:
11855                                 metadata_process = self._process_iter.next()
11856                         except StopIteration:
11857                                 return False
11858
11859                         self._jobs += 1
11860                         metadata_process.scheduler = self._sched_iface
11861                         metadata_process.addExitListener(self._metadata_exit)
11862                         metadata_process.start()
11863                 return True
11864
11865         def _metadata_exit(self, metadata_process):
11866                 self._jobs -= 1
11867                 if metadata_process.returncode != os.EX_OK:
11868                         self.returncode = 1
11869                         self._error_count += 1
11870                         self._valid_pkgs.discard(metadata_process.cpv)
11871                         portage.writemsg("Error processing %s, continuing...\n" % \
11872                                 (metadata_process.cpv,), noiselevel=-1)
11873
11874                 if self._consumer is not None:
11875                         # On failure, still notify the consumer (in this case the metadata
11876                         # argument is None).
11877                         self._consumer(metadata_process.cpv,
11878                                 metadata_process.ebuild_path,
11879                                 metadata_process.repo_path,
11880                                 metadata_process.metadata)
11881
11882                 self._schedule()
11883
11884 class UninstallFailure(portage.exception.PortageException):
11885         """
11886         An instance of this class is raised by unmerge() when
11887         an uninstallation fails.
11888         """
11889         status = 1
11890         def __init__(self, *pargs):
11891                 portage.exception.PortageException.__init__(self, pargs)
11892                 if pargs:
11893                         self.status = pargs[0]
11894
11895 def unmerge(root_config, myopts, unmerge_action,
11896         unmerge_files, ldpath_mtimes, autoclean=0,
11897         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11898         scheduler=None, writemsg_level=portage.util.writemsg_level):
11899
11900         if clean_world:
11901                 clean_world = myopts.get('--deselect') != 'n'
11902         quiet = "--quiet" in myopts
11903         settings = root_config.settings
11904         sets = root_config.sets
11905         vartree = root_config.trees["vartree"]
11906         candidate_catpkgs=[]
11907         global_unmerge=0
11908         xterm_titles = "notitles" not in settings.features
11909         out = portage.output.EOutput()
11910         pkg_cache = {}
11911         db_keys = list(vartree.dbapi._aux_cache_keys)
11912
11913         def _pkg(cpv):
11914                 pkg = pkg_cache.get(cpv)
11915                 if pkg is None:
11916                         pkg = Package(cpv=cpv, installed=True,
11917                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11918                                 root_config=root_config,
11919                                 type_name="installed")
11920                         pkg_cache[cpv] = pkg
11921                 return pkg
11922
11923         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11924         try:
11925                 # At least the parent needs to exist for the lock file.
11926                 portage.util.ensure_dirs(vdb_path)
11927         except portage.exception.PortageException:
11928                 pass
11929         vdb_lock = None
11930         try:
11931                 if os.access(vdb_path, os.W_OK):
11932                         vdb_lock = portage.locks.lockdir(vdb_path)
11933                 realsyslist = sets["system"].getAtoms()
11934                 syslist = []
11935                 for x in realsyslist:
11936                         mycp = portage.dep_getkey(x)
11937                         if mycp in settings.getvirtuals():
11938                                 providers = []
11939                                 for provider in settings.getvirtuals()[mycp]:
11940                                         if vartree.dbapi.match(provider):
11941                                                 providers.append(provider)
11942                                 if len(providers) == 1:
11943                                         syslist.extend(providers)
11944                         else:
11945                                 syslist.append(mycp)
11946         
11947                 mysettings = portage.config(clone=settings)
11948         
11949                 if not unmerge_files:
11950                         if unmerge_action == "unmerge":
11951                                 print
11952                                 print bold("emerge unmerge") + " can only be used with specific package names"
11953                                 print
11954                                 return 0
11955                         else:
11956                                 global_unmerge = 1
11957         
11958                 localtree = vartree
11959                 # process all arguments and add all
11960                 # valid db entries to candidate_catpkgs
11961                 if global_unmerge:
11962                         if not unmerge_files:
11963                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11964                 else:
11965                         #we've got command-line arguments
11966                         if not unmerge_files:
11967                                 print "\nNo packages to unmerge have been provided.\n"
11968                                 return 0
11969                         for x in unmerge_files:
11970                                 arg_parts = x.split('/')
11971                                 if x[0] not in [".","/"] and \
11972                                         arg_parts[-1][-7:] != ".ebuild":
11973                                         #possible cat/pkg or dep; treat as such
11974                                         candidate_catpkgs.append(x)
11975                                 elif unmerge_action in ["prune","clean"]:
11976                                         print "\n!!! Prune and clean do not accept individual" + \
11977                                                 " ebuilds as arguments;\n    skipping.\n"
11978                                         continue
11979                                 else:
11980                                         # it appears that the user is specifying an installed
11981                                         # ebuild and we're in "unmerge" mode, so it's ok.
11982                                         if not os.path.exists(x):
11983                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11984                                                 return 0
11985         
11986                                         absx   = os.path.abspath(x)
11987                                         sp_absx = absx.split("/")
11988                                         if sp_absx[-1][-7:] == ".ebuild":
11989                                                 del sp_absx[-1]
11990                                                 absx = "/".join(sp_absx)
11991         
11992                                         sp_absx_len = len(sp_absx)
11993         
11994                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11995                                         vdb_len  = len(vdb_path)
11996         
11997                                         sp_vdb     = vdb_path.split("/")
11998                                         sp_vdb_len = len(sp_vdb)
11999         
12000                                         if not os.path.exists(absx+"/CONTENTS"):
12001                                                 print "!!! Not a valid db dir: "+str(absx)
12002                                                 return 0
12003         
12004                                         if sp_absx_len <= sp_vdb_len:
12005                                                 # The Path is shorter... so it can't be inside the vdb.
12006                                                 print sp_absx
12007                                                 print absx
12008                                                 print "\n!!!",x,"cannot be inside "+ \
12009                                                         vdb_path+"; aborting.\n"
12010                                                 return 0
12011         
12012                                         for idx in range(0,sp_vdb_len):
12013                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
12014                                                         print sp_absx
12015                                                         print absx
12016                                                         print "\n!!!", x, "is not inside "+\
12017                                                                 vdb_path+"; aborting.\n"
12018                                                         return 0
12019         
12020                                         print "="+"/".join(sp_absx[sp_vdb_len:])
12021                                         candidate_catpkgs.append(
12022                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
12023         
12024                 newline=""
12025                 if (not "--quiet" in myopts):
12026                         newline="\n"
12027                 if settings["ROOT"] != "/":
12028                         writemsg_level(darkgreen(newline+ \
12029                                 ">>> Using system located in ROOT tree %s\n" % \
12030                                 settings["ROOT"]))
12031
12032                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
12033                         not ("--quiet" in myopts):
12034                         writemsg_level(darkgreen(newline+\
12035                                 ">>> These are the packages that would be unmerged:\n"))
12036
12037                 # Preservation of order is required for --depclean and --prune so
12038                 # that dependencies are respected. Use all_selected to eliminate
12039                 # duplicate packages since the same package may be selected by
12040                 # multiple atoms.
12041                 pkgmap = []
12042                 all_selected = set()
12043                 for x in candidate_catpkgs:
12044                         # cycle through all our candidate deps and determine
12045                         # what will and will not get unmerged
12046                         try:
12047                                 mymatch = vartree.dbapi.match(x)
12048                         except portage.exception.AmbiguousPackageName, errpkgs:
12049                                 print "\n\n!!! The short ebuild name \"" + \
12050                                         x + "\" is ambiguous.  Please specify"
12051                                 print "!!! one of the following fully-qualified " + \
12052                                         "ebuild names instead:\n"
12053                                 for i in errpkgs[0]:
12054                                         print "    " + green(i)
12055                                 print
12056                                 sys.exit(1)
12057         
12058                         if not mymatch and x[0] not in "<>=~":
12059                                 mymatch = localtree.dep_match(x)
12060                         if not mymatch:
12061                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12062                                         (x, unmerge_action), noiselevel=-1)
12063                                 continue
12064
12065                         pkgmap.append(
12066                                 {"protected": set(), "selected": set(), "omitted": set()})
12067                         mykey = len(pkgmap) - 1
12068                         if unmerge_action=="unmerge":
12069                                         for y in mymatch:
12070                                                 if y not in all_selected:
12071                                                         pkgmap[mykey]["selected"].add(y)
12072                                                         all_selected.add(y)
12073                         elif unmerge_action == "prune":
12074                                 if len(mymatch) == 1:
12075                                         continue
12076                                 best_version = mymatch[0]
12077                                 best_slot = vartree.getslot(best_version)
12078                                 best_counter = vartree.dbapi.cpv_counter(best_version)
12079                                 for mypkg in mymatch[1:]:
12080                                         myslot = vartree.getslot(mypkg)
12081                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
12082                                         if (myslot == best_slot and mycounter > best_counter) or \
12083                                                 mypkg == portage.best([mypkg, best_version]):
12084                                                 if myslot == best_slot:
12085                                                         if mycounter < best_counter:
12086                                                                 # On slot collision, keep the one with the
12087                                                                 # highest counter since it is the most
12088                                                                 # recently installed.
12089                                                                 continue
12090                                                 best_version = mypkg
12091                                                 best_slot = myslot
12092                                                 best_counter = mycounter
12093                                 pkgmap[mykey]["protected"].add(best_version)
12094                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12095                                         if mypkg != best_version and mypkg not in all_selected)
12096                                 all_selected.update(pkgmap[mykey]["selected"])
12097                         else:
12098                                 # unmerge_action == "clean"
12099                                 slotmap={}
12100                                 for mypkg in mymatch:
12101                                         if unmerge_action == "clean":
12102                                                 myslot = localtree.getslot(mypkg)
12103                                         else:
12104                                                 # since we're pruning, we don't care about slots
12105                                                 # and put all the pkgs in together
12106                                                 myslot = 0
12107                                         if myslot not in slotmap:
12108                                                 slotmap[myslot] = {}
12109                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12110
12111                                 for mypkg in vartree.dbapi.cp_list(
12112                                         portage.dep_getkey(mymatch[0])):
12113                                         myslot = vartree.getslot(mypkg)
12114                                         if myslot not in slotmap:
12115                                                 slotmap[myslot] = {}
12116                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12117
12118                                 for myslot in slotmap:
12119                                         counterkeys = slotmap[myslot].keys()
12120                                         if not counterkeys:
12121                                                 continue
12122                                         counterkeys.sort()
12123                                         pkgmap[mykey]["protected"].add(
12124                                                 slotmap[myslot][counterkeys[-1]])
12125                                         del counterkeys[-1]
12126
12127                                         for counter in counterkeys[:]:
12128                                                 mypkg = slotmap[myslot][counter]
12129                                                 if mypkg not in mymatch:
12130                                                         counterkeys.remove(counter)
12131                                                         pkgmap[mykey]["protected"].add(
12132                                                                 slotmap[myslot][counter])
12133
12134                                         #be pretty and get them in order of merge:
12135                                         for ckey in counterkeys:
12136                                                 mypkg = slotmap[myslot][ckey]
12137                                                 if mypkg not in all_selected:
12138                                                         pkgmap[mykey]["selected"].add(mypkg)
12139                                                         all_selected.add(mypkg)
12140                                         # ok, now the last-merged package
12141                                         # is protected, and the rest are selected
12142                 numselected = len(all_selected)
12143                 if global_unmerge and not numselected:
12144                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12145                         return 0
12146         
12147                 if not numselected:
12148                         portage.writemsg_stdout(
12149                                 "\n>>> No packages selected for removal by " + \
12150                                 unmerge_action + "\n")
12151                         return 0
12152         finally:
12153                 if vdb_lock:
12154                         vartree.dbapi.flush_cache()
12155                         portage.locks.unlockdir(vdb_lock)
12156         
12157         from portage.sets.base import EditablePackageSet
12158         
12159         # generate a list of package sets that are directly or indirectly listed in "world",
12160         # as there is no persistent list of "installed" sets
12161         installed_sets = ["world"]
12162         stop = False
12163         pos = 0
12164         while not stop:
12165                 stop = True
12166                 pos = len(installed_sets)
12167                 for s in installed_sets[pos - 1:]:
12168                         if s not in sets:
12169                                 continue
12170                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12171                         if candidates:
12172                                 stop = False
12173                                 installed_sets += candidates
12174         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12175         del stop, pos
12176
12177         # we don't want to unmerge packages that are still listed in user-editable package sets
12178         # listed in "world" as they would be remerged on the next update of "world" or the 
12179         # relevant package sets.
12180         unknown_sets = set()
12181         for cp in xrange(len(pkgmap)):
12182                 for cpv in pkgmap[cp]["selected"].copy():
12183                         try:
12184                                 pkg = _pkg(cpv)
12185                         except KeyError:
12186                                 # It could have been uninstalled
12187                                 # by a concurrent process.
12188                                 continue
12189
12190                         if unmerge_action != "clean" and \
12191                                 root_config.root == "/" and \
12192                                 portage.match_from_list(
12193                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12194                                 msg = ("Not unmerging package %s since there is no valid " + \
12195                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12196                                 for line in textwrap.wrap(msg, 75):
12197                                         out.eerror(line)
12198                                 # adjust pkgmap so the display output is correct
12199                                 pkgmap[cp]["selected"].remove(cpv)
12200                                 all_selected.remove(cpv)
12201                                 pkgmap[cp]["protected"].add(cpv)
12202                                 continue
12203
12204                         parents = []
12205                         for s in installed_sets:
12206                                 # skip sets that the user requested to unmerge, and skip world 
12207                                 # unless we're unmerging a package set (as the package would be 
12208                                 # removed from "world" later on)
12209                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12210                                         continue
12211
12212                                 if s not in sets:
12213                                         if s in unknown_sets:
12214                                                 continue
12215                                         unknown_sets.add(s)
12216                                         out = portage.output.EOutput()
12217                                         out.eerror(("Unknown set '@%s' in " + \
12218                                                 "%svar/lib/portage/world_sets") % \
12219                                                 (s, root_config.root))
12220                                         continue
12221
12222                                 # only check instances of EditablePackageSet as other classes are generally used for
12223                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12224                                 # user can't do much about them anyway)
12225                                 if isinstance(sets[s], EditablePackageSet):
12226
12227                                         # This is derived from a snippet of code in the
12228                                         # depgraph._iter_atoms_for_pkg() method.
12229                                         for atom in sets[s].iterAtomsForPackage(pkg):
12230                                                 inst_matches = vartree.dbapi.match(atom)
12231                                                 inst_matches.reverse() # descending order
12232                                                 higher_slot = None
12233                                                 for inst_cpv in inst_matches:
12234                                                         try:
12235                                                                 inst_pkg = _pkg(inst_cpv)
12236                                                         except KeyError:
12237                                                                 # It could have been uninstalled
12238                                                                 # by a concurrent process.
12239                                                                 continue
12240
12241                                                         if inst_pkg.cp != atom.cp:
12242                                                                 continue
12243                                                         if pkg >= inst_pkg:
12244                                                                 # This is descending order, and we're not
12245                                                                 # interested in any versions <= pkg given.
12246                                                                 break
12247                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12248                                                                 higher_slot = inst_pkg
12249                                                                 break
12250                                                 if higher_slot is None:
12251                                                         parents.append(s)
12252                                                         break
12253                         if parents:
12254                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12255                                 #print colorize("WARN", "but still listed in the following package sets:")
12256                                 #print "    %s\n" % ", ".join(parents)
12257                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12258                                 print colorize("WARN", "still referenced by the following package sets:")
12259                                 print "    %s\n" % ", ".join(parents)
12260                                 # adjust pkgmap so the display output is correct
12261                                 pkgmap[cp]["selected"].remove(cpv)
12262                                 all_selected.remove(cpv)
12263                                 pkgmap[cp]["protected"].add(cpv)
12264         
12265         del installed_sets
12266
12267         numselected = len(all_selected)
12268         if not numselected:
12269                 writemsg_level(
12270                         "\n>>> No packages selected for removal by " + \
12271                         unmerge_action + "\n")
12272                 return 0
12273
12274         # Unmerge order only matters in some cases
12275         if not ordered:
12276                 unordered = {}
12277                 for d in pkgmap:
12278                         selected = d["selected"]
12279                         if not selected:
12280                                 continue
12281                         cp = portage.cpv_getkey(iter(selected).next())
12282                         cp_dict = unordered.get(cp)
12283                         if cp_dict is None:
12284                                 cp_dict = {}
12285                                 unordered[cp] = cp_dict
12286                                 for k in d:
12287                                         cp_dict[k] = set()
12288                         for k, v in d.iteritems():
12289                                 cp_dict[k].update(v)
12290                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12291
12292         for x in xrange(len(pkgmap)):
12293                 selected = pkgmap[x]["selected"]
12294                 if not selected:
12295                         continue
12296                 for mytype, mylist in pkgmap[x].iteritems():
12297                         if mytype == "selected":
12298                                 continue
12299                         mylist.difference_update(all_selected)
12300                 cp = portage.cpv_getkey(iter(selected).next())
12301                 for y in localtree.dep_match(cp):
12302                         if y not in pkgmap[x]["omitted"] and \
12303                                 y not in pkgmap[x]["selected"] and \
12304                                 y not in pkgmap[x]["protected"] and \
12305                                 y not in all_selected:
12306                                 pkgmap[x]["omitted"].add(y)
12307                 if global_unmerge and not pkgmap[x]["selected"]:
12308                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12309                         continue
12310                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12311                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12312                                 "'%s' is part of your system profile.\n" % cp),
12313                                 level=logging.WARNING, noiselevel=-1)
12314                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12315                                 "be damaging to your system.\n\n"),
12316                                 level=logging.WARNING, noiselevel=-1)
12317                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12318                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12319                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12320                 if not quiet:
12321                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12322                 else:
12323                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12324                 for mytype in ["selected","protected","omitted"]:
12325                         if not quiet:
12326                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12327                         if pkgmap[x][mytype]:
12328                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12329                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12330                                 for pn, ver, rev in sorted_pkgs:
12331                                         if rev == "r0":
12332                                                 myversion = ver
12333                                         else:
12334                                                 myversion = ver + "-" + rev
12335                                         if mytype == "selected":
12336                                                 writemsg_level(
12337                                                         colorize("UNMERGE_WARN", myversion + " "),
12338                                                         noiselevel=-1)
12339                                         else:
12340                                                 writemsg_level(
12341                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12342                         else:
12343                                 writemsg_level("none ", noiselevel=-1)
12344                         if not quiet:
12345                                 writemsg_level("\n", noiselevel=-1)
12346                 if quiet:
12347                         writemsg_level("\n", noiselevel=-1)
12348
12349         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12350                 " packages are slated for removal.\n")
12351         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12352                         " and " + colorize("GOOD", "'omitted'") + \
12353                         " packages will not be removed.\n\n")
12354
12355         if "--pretend" in myopts:
12356                 #we're done... return
12357                 return 0
12358         if "--ask" in myopts:
12359                 if userquery("Would you like to unmerge these packages?")=="No":
12360                         # enter pretend mode for correct formatting of results
12361                         myopts["--pretend"] = True
12362                         print
12363                         print "Quitting."
12364                         print
12365                         return 0
12366         #the real unmerging begins, after a short delay....
12367         if clean_delay and not autoclean:
12368                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12369
12370         for x in xrange(len(pkgmap)):
12371                 for y in pkgmap[x]["selected"]:
12372                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12373                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12374                         mysplit = y.split("/")
12375                         #unmerge...
12376                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12377                                 mysettings, unmerge_action not in ["clean","prune"],
12378                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12379                                 scheduler=scheduler)
12380
12381                         if retval != os.EX_OK:
12382                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12383                                 if raise_on_error:
12384                                         raise UninstallFailure(retval)
12385                                 sys.exit(retval)
12386                         else:
12387                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12388                                         sets["world"].cleanPackage(vartree.dbapi, y)
12389                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12390         if clean_world and hasattr(sets["world"], "remove"):
12391                 for s in root_config.setconfig.active:
12392                         sets["world"].remove(SETPREFIX+s)
12393         return 1
12394
12395 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12396
12397         if os.path.exists("/usr/bin/install-info"):
12398                 out = portage.output.EOutput()
12399                 regen_infodirs=[]
12400                 for z in infodirs:
12401                         if z=='':
12402                                 continue
12403                         inforoot=normpath(root+z)
12404                         if os.path.isdir(inforoot):
12405                                 infomtime = long(os.stat(inforoot).st_mtime)
12406                                 if inforoot not in prev_mtimes or \
12407                                         prev_mtimes[inforoot] != infomtime:
12408                                                 regen_infodirs.append(inforoot)
12409
12410                 if not regen_infodirs:
12411                         portage.writemsg_stdout("\n")
12412                         out.einfo("GNU info directory index is up-to-date.")
12413                 else:
12414                         portage.writemsg_stdout("\n")
12415                         out.einfo("Regenerating GNU info directory index...")
12416
12417                         dir_extensions = ("", ".gz", ".bz2")
12418                         icount=0
12419                         badcount=0
12420                         errmsg = ""
12421                         for inforoot in regen_infodirs:
12422                                 if inforoot=='':
12423                                         continue
12424
12425                                 if not os.path.isdir(inforoot) or \
12426                                         not os.access(inforoot, os.W_OK):
12427                                         continue
12428
12429                                 file_list = os.listdir(inforoot)
12430                                 file_list.sort()
12431                                 dir_file = os.path.join(inforoot, "dir")
12432                                 moved_old_dir = False
12433                                 processed_count = 0
12434                                 for x in file_list:
12435                                         if x.startswith(".") or \
12436                                                 os.path.isdir(os.path.join(inforoot, x)):
12437                                                 continue
12438                                         if x.startswith("dir"):
12439                                                 skip = False
12440                                                 for ext in dir_extensions:
12441                                                         if x == "dir" + ext or \
12442                                                                 x == "dir" + ext + ".old":
12443                                                                 skip = True
12444                                                                 break
12445                                                 if skip:
12446                                                         continue
12447                                         if processed_count == 0:
12448                                                 for ext in dir_extensions:
12449                                                         try:
12450                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12451                                                                 moved_old_dir = True
12452                                                         except EnvironmentError, e:
12453                                                                 if e.errno != errno.ENOENT:
12454                                                                         raise
12455                                                                 del e
12456                                         processed_count += 1
12457                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12458                                         existsstr="already exists, for file `"
12459                                         if myso!="":
12460                                                 if re.search(existsstr,myso):
12461                                                         # Already exists... Don't increment the count for this.
12462                                                         pass
12463                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12464                                                         # This info file doesn't contain a DIR-header: install-info produces this
12465                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12466                                                         # Don't increment the count for this.
12467                                                         pass
12468                                                 else:
12469                                                         badcount=badcount+1
12470                                                         errmsg += myso + "\n"
12471                                         icount=icount+1
12472
12473                                 if moved_old_dir and not os.path.exists(dir_file):
12474                                         # We didn't generate a new dir file, so put the old file
12475                                         # back where it was originally found.
12476                                         for ext in dir_extensions:
12477                                                 try:
12478                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12479                                                 except EnvironmentError, e:
12480                                                         if e.errno != errno.ENOENT:
12481                                                                 raise
12482                                                         del e
12483
12484                                 # Clean dir.old cruft so that they don't prevent
12485                                 # unmerge of otherwise empty directories.
12486                                 for ext in dir_extensions:
12487                                         try:
12488                                                 os.unlink(dir_file + ext + ".old")
12489                                         except EnvironmentError, e:
12490                                                 if e.errno != errno.ENOENT:
12491                                                         raise
12492                                                 del e
12493
12494                                 #update mtime so we can potentially avoid regenerating.
12495                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12496
12497                         if badcount:
12498                                 out.eerror("Processed %d info files; %d errors." % \
12499                                         (icount, badcount))
12500                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12501                         else:
12502                                 if icount > 0:
12503                                         out.einfo("Processed %d info files." % (icount,))
12504
12505
12506 def display_news_notification(root_config, myopts):
12507         target_root = root_config.root
12508         trees = root_config.trees
12509         settings = trees["vartree"].settings
12510         portdb = trees["porttree"].dbapi
12511         vardb = trees["vartree"].dbapi
12512         NEWS_PATH = os.path.join("metadata", "news")
12513         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12514         newsReaderDisplay = False
12515         update = "--pretend" not in myopts
12516
12517         for repo in portdb.getRepositories():
12518                 unreadItems = checkUpdatedNewsItems(
12519                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12520                 if unreadItems:
12521                         if not newsReaderDisplay:
12522                                 newsReaderDisplay = True
12523                                 print
12524                         print colorize("WARN", " * IMPORTANT:"),
12525                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12526                         
12527         
12528         if newsReaderDisplay:
12529                 print colorize("WARN", " *"),
12530                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12531                 print
12532
12533 def display_preserved_libs(vardbapi):
12534         MAX_DISPLAY = 3
12535
12536         # Ensure the registry is consistent with existing files.
12537         vardbapi.plib_registry.pruneNonExisting()
12538
12539         if vardbapi.plib_registry.hasEntries():
12540                 print
12541                 print colorize("WARN", "!!!") + " existing preserved libs:"
12542                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12543                 linkmap = vardbapi.linkmap
12544                 consumer_map = {}
12545                 owners = {}
12546                 linkmap_broken = False
12547
12548                 try:
12549                         linkmap.rebuild()
12550                 except portage.exception.CommandNotFound, e:
12551                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12552                                 level=logging.ERROR, noiselevel=-1)
12553                         del e
12554                         linkmap_broken = True
12555                 else:
12556                         search_for_owners = set()
12557                         for cpv in plibdata:
12558                                 internal_plib_keys = set(linkmap._obj_key(f) \
12559                                         for f in plibdata[cpv])
12560                                 for f in plibdata[cpv]:
12561                                         if f in consumer_map:
12562                                                 continue
12563                                         consumers = []
12564                                         for c in linkmap.findConsumers(f):
12565                                                 # Filter out any consumers that are also preserved libs
12566                                                 # belonging to the same package as the provider.
12567                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12568                                                         consumers.append(c)
12569                                         consumers.sort()
12570                                         consumer_map[f] = consumers
12571                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12572
12573                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12574
12575                 for cpv in plibdata:
12576                         print colorize("WARN", ">>>") + " package: %s" % cpv
12577                         samefile_map = {}
12578                         for f in plibdata[cpv]:
12579                                 obj_key = linkmap._obj_key(f)
12580                                 alt_paths = samefile_map.get(obj_key)
12581                                 if alt_paths is None:
12582                                         alt_paths = set()
12583                                         samefile_map[obj_key] = alt_paths
12584                                 alt_paths.add(f)
12585
12586                         for alt_paths in samefile_map.itervalues():
12587                                 alt_paths = sorted(alt_paths)
12588                                 for p in alt_paths:
12589                                         print colorize("WARN", " * ") + " - %s" % (p,)
12590                                 f = alt_paths[0]
12591                                 consumers = consumer_map.get(f, [])
12592                                 for c in consumers[:MAX_DISPLAY]:
12593                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12594                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12595                                 if len(consumers) == MAX_DISPLAY + 1:
12596                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12597                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12598                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12599                                 elif len(consumers) > MAX_DISPLAY:
12600                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12601                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12602
12603
12604 def _flush_elog_mod_echo():
12605         """
12606         Dump the mod_echo output now so that our other
12607         notifications are shown last.
12608         @rtype: bool
12609         @returns: True if messages were shown, False otherwise.
12610         """
12611         messages_shown = False
12612         try:
12613                 from portage.elog import mod_echo
12614         except ImportError:
12615                 pass # happens during downgrade to a version without the module
12616         else:
12617                 messages_shown = bool(mod_echo._items)
12618                 mod_echo.finalize()
12619         return messages_shown
12620
12621 def post_emerge(root_config, myopts, mtimedb, retval):
12622         """
12623         Misc. things to run at the end of a merge session.
12624         
12625         Update Info Files
12626         Update Config Files
12627         Update News Items
12628         Commit mtimeDB
12629         Display preserved libs warnings
12630         Exit Emerge
12631
12632         @param trees: A dictionary mapping each ROOT to it's package databases
12633         @type trees: dict
12634         @param mtimedb: The mtimeDB to store data needed across merge invocations
12635         @type mtimedb: MtimeDB class instance
12636         @param retval: Emerge's return value
12637         @type retval: Int
12638         @rype: None
12639         @returns:
12640         1.  Calls sys.exit(retval)
12641         """
12642
12643         target_root = root_config.root
12644         trees = { target_root : root_config.trees }
12645         vardbapi = trees[target_root]["vartree"].dbapi
12646         settings = vardbapi.settings
12647         info_mtimes = mtimedb["info"]
12648
12649         # Load the most current variables from ${ROOT}/etc/profile.env
12650         settings.unlock()
12651         settings.reload()
12652         settings.regenerate()
12653         settings.lock()
12654
12655         config_protect = settings.get("CONFIG_PROTECT","").split()
12656         infodirs = settings.get("INFOPATH","").split(":") + \
12657                 settings.get("INFODIR","").split(":")
12658
12659         os.chdir("/")
12660
12661         if retval == os.EX_OK:
12662                 exit_msg = " *** exiting successfully."
12663         else:
12664                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12665         emergelog("notitles" not in settings.features, exit_msg)
12666
12667         _flush_elog_mod_echo()
12668
12669         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12670         if "--pretend" in myopts or (counter_hash is not None and \
12671                 counter_hash == vardbapi._counter_hash()):
12672                 display_news_notification(root_config, myopts)
12673                 # If vdb state has not changed then there's nothing else to do.
12674                 sys.exit(retval)
12675
12676         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12677         portage.util.ensure_dirs(vdb_path)
12678         vdb_lock = None
12679         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12680                 vdb_lock = portage.locks.lockdir(vdb_path)
12681
12682         if vdb_lock:
12683                 try:
12684                         if "noinfo" not in settings.features:
12685                                 chk_updated_info_files(target_root,
12686                                         infodirs, info_mtimes, retval)
12687                         mtimedb.commit()
12688                 finally:
12689                         if vdb_lock:
12690                                 portage.locks.unlockdir(vdb_lock)
12691
12692         chk_updated_cfg_files(target_root, config_protect)
12693         
12694         display_news_notification(root_config, myopts)
12695         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12696                 display_preserved_libs(vardbapi)        
12697
12698         sys.exit(retval)
12699
12700
12701 def chk_updated_cfg_files(target_root, config_protect):
12702         if config_protect:
12703                 #number of directories with some protect files in them
12704                 procount=0
12705                 for x in config_protect:
12706                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12707                         if not os.access(x, os.W_OK):
12708                                 # Avoid Permission denied errors generated
12709                                 # later by `find`.
12710                                 continue
12711                         try:
12712                                 mymode = os.lstat(x).st_mode
12713                         except OSError:
12714                                 continue
12715                         if stat.S_ISLNK(mymode):
12716                                 # We want to treat it like a directory if it
12717                                 # is a symlink to an existing directory.
12718                                 try:
12719                                         real_mode = os.stat(x).st_mode
12720                                         if stat.S_ISDIR(real_mode):
12721                                                 mymode = real_mode
12722                                 except OSError:
12723                                         pass
12724                         if stat.S_ISDIR(mymode):
12725                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12726                         else:
12727                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12728                                         os.path.split(x.rstrip(os.path.sep))
12729                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12730                         a = commands.getstatusoutput(mycommand)
12731                         if a[0] != 0:
12732                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12733                                 sys.stderr.flush()
12734                                 # Show the error message alone, sending stdout to /dev/null.
12735                                 os.system(mycommand + " 1>/dev/null")
12736                         else:
12737                                 files = a[1].split('\0')
12738                                 # split always produces an empty string as the last element
12739                                 if files and not files[-1]:
12740                                         del files[-1]
12741                                 if files:
12742                                         procount += 1
12743                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12744                                         if stat.S_ISDIR(mymode):
12745                                                  print "%d config files in '%s' need updating." % \
12746                                                         (len(files), x)
12747                                         else:
12748                                                  print "config file '%s' needs updating." % x
12749
12750                 if procount:
12751                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12752                                 " section of the " + bold("emerge")
12753                         print " "+yellow("*")+" man page to learn how to update config files."
12754
12755 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12756         update=False):
12757         """
12758         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12759         Returns the number of unread (yet relevent) items.
12760         
12761         @param portdb: a portage tree database
12762         @type portdb: pordbapi
12763         @param vardb: an installed package database
12764         @type vardb: vardbapi
12765         @param NEWS_PATH:
12766         @type NEWS_PATH:
12767         @param UNREAD_PATH:
12768         @type UNREAD_PATH:
12769         @param repo_id:
12770         @type repo_id:
12771         @rtype: Integer
12772         @returns:
12773         1.  The number of unread but relevant news items.
12774         
12775         """
12776         from portage.news import NewsManager
12777         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12778         return manager.getUnreadItems( repo_id, update=update )
12779
12780 def insert_category_into_atom(atom, category):
12781         alphanum = re.search(r'\w', atom)
12782         if alphanum:
12783                 ret = atom[:alphanum.start()] + "%s/" % category + \
12784                         atom[alphanum.start():]
12785         else:
12786                 ret = None
12787         return ret
12788
12789 def is_valid_package_atom(x):
12790         if "/" not in x:
12791                 alphanum = re.search(r'\w', x)
12792                 if alphanum:
12793                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12794         return portage.isvalidatom(x)
12795
12796 def show_blocker_docs_link():
12797         print
12798         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12799         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12800         print
12801         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12802         print
12803
12804 def show_mask_docs():
12805         print "For more information, see the MASKED PACKAGES section in the emerge"
12806         print "man page or refer to the Gentoo Handbook."
12807
12808 def action_sync(settings, trees, mtimedb, myopts, myaction):
12809         xterm_titles = "notitles" not in settings.features
12810         emergelog(xterm_titles, " === sync")
12811         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12812         myportdir = portdb.porttree_root
12813         out = portage.output.EOutput()
12814         if not myportdir:
12815                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12816                 sys.exit(1)
12817         if myportdir[-1]=="/":
12818                 myportdir=myportdir[:-1]
12819         try:
12820                 st = os.stat(myportdir)
12821         except OSError:
12822                 st = None
12823         if st is None:
12824                 print ">>>",myportdir,"not found, creating it."
12825                 os.makedirs(myportdir,0755)
12826                 st = os.stat(myportdir)
12827
12828         spawn_kwargs = {}
12829         spawn_kwargs["env"] = settings.environ()
12830         if 'usersync' in settings.features and \
12831                 portage.data.secpass >= 2 and \
12832                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12833                 st.st_gid != os.getgid() and st.st_mode & 0070):
12834                 try:
12835                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12836                 except KeyError:
12837                         pass
12838                 else:
12839                         # Drop privileges when syncing, in order to match
12840                         # existing uid/gid settings.
12841                         spawn_kwargs["uid"]    = st.st_uid
12842                         spawn_kwargs["gid"]    = st.st_gid
12843                         spawn_kwargs["groups"] = [st.st_gid]
12844                         spawn_kwargs["env"]["HOME"] = homedir
12845                         umask = 0002
12846                         if not st.st_mode & 0020:
12847                                 umask = umask | 0020
12848                         spawn_kwargs["umask"] = umask
12849
12850         syncuri = settings.get("SYNC", "").strip()
12851         if not syncuri:
12852                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12853                         noiselevel=-1, level=logging.ERROR)
12854                 return 1
12855
12856         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12857         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12858
12859         os.umask(0022)
12860         dosyncuri = syncuri
12861         updatecache_flg = False
12862         if myaction == "metadata":
12863                 print "skipping sync"
12864                 updatecache_flg = True
12865         elif ".git" in vcs_dirs:
12866                 # Update existing git repository, and ignore the syncuri. We are
12867                 # going to trust the user and assume that the user is in the branch
12868                 # that he/she wants updated. We'll let the user manage branches with
12869                 # git directly.
12870                 if portage.process.find_binary("git") is None:
12871                         msg = ["Command not found: git",
12872                         "Type \"emerge dev-util/git\" to enable git support."]
12873                         for l in msg:
12874                                 writemsg_level("!!! %s\n" % l,
12875                                         level=logging.ERROR, noiselevel=-1)
12876                         return 1
12877                 msg = ">>> Starting git pull in %s..." % myportdir
12878                 emergelog(xterm_titles, msg )
12879                 writemsg_level(msg + "\n")
12880                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12881                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12882                 if exitcode != os.EX_OK:
12883                         msg = "!!! git pull error in %s." % myportdir
12884                         emergelog(xterm_titles, msg)
12885                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12886                         return exitcode
12887                 msg = ">>> Git pull in %s successful" % myportdir
12888                 emergelog(xterm_titles, msg)
12889                 writemsg_level(msg + "\n")
12890                 exitcode = git_sync_timestamps(settings, myportdir)
12891                 if exitcode == os.EX_OK:
12892                         updatecache_flg = True
12893         elif syncuri[:8]=="rsync://":
12894                 for vcs_dir in vcs_dirs:
12895                         writemsg_level(("!!! %s appears to be under revision " + \
12896                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12897                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12898                         return 1
12899                 if not os.path.exists("/usr/bin/rsync"):
12900                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12901                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12902                         sys.exit(1)
12903                 mytimeout=180
12904
12905                 rsync_opts = []
12906                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12907                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12908                         rsync_opts.extend([
12909                                 "--recursive",    # Recurse directories
12910                                 "--links",        # Consider symlinks
12911                                 "--safe-links",   # Ignore links outside of tree
12912                                 "--perms",        # Preserve permissions
12913                                 "--times",        # Preserive mod times
12914                                 "--compress",     # Compress the data transmitted
12915                                 "--force",        # Force deletion on non-empty dirs
12916                                 "--whole-file",   # Don't do block transfers, only entire files
12917                                 "--delete",       # Delete files that aren't in the master tree
12918                                 "--stats",        # Show final statistics about what was transfered
12919                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12920                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12921                                 "--exclude=/local",       # Exclude local     from consideration
12922                                 "--exclude=/packages",    # Exclude packages  from consideration
12923                         ])
12924
12925                 else:
12926                         # The below validation is not needed when using the above hardcoded
12927                         # defaults.
12928
12929                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12930                         rsync_opts.extend(
12931                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12932                         for opt in ("--recursive", "--times"):
12933                                 if opt not in rsync_opts:
12934                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12935                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12936                                         rsync_opts.append(opt)
12937         
12938                         for exclude in ("distfiles", "local", "packages"):
12939                                 opt = "--exclude=/%s" % exclude
12940                                 if opt not in rsync_opts:
12941                                         portage.writemsg(yellow("WARNING:") + \
12942                                         " adding required option %s not included in "  % opt + \
12943                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12944                                         rsync_opts.append(opt)
12945         
12946                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12947                                 def rsync_opt_startswith(opt_prefix):
12948                                         for x in rsync_opts:
12949                                                 if x.startswith(opt_prefix):
12950                                                         return True
12951                                         return False
12952
12953                                 if not rsync_opt_startswith("--timeout="):
12954                                         rsync_opts.append("--timeout=%d" % mytimeout)
12955
12956                                 for opt in ("--compress", "--whole-file"):
12957                                         if opt not in rsync_opts:
12958                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12959                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12960                                                 rsync_opts.append(opt)
12961
12962                 if "--quiet" in myopts:
12963                         rsync_opts.append("--quiet")    # Shut up a lot
12964                 else:
12965                         rsync_opts.append("--verbose")  # Print filelist
12966
12967                 if "--verbose" in myopts:
12968                         rsync_opts.append("--progress")  # Progress meter for each file
12969
12970                 if "--debug" in myopts:
12971                         rsync_opts.append("--checksum") # Force checksum on all files
12972
12973                 # Real local timestamp file.
12974                 servertimestampfile = os.path.join(
12975                         myportdir, "metadata", "timestamp.chk")
12976
12977                 content = portage.util.grabfile(servertimestampfile)
12978                 mytimestamp = 0
12979                 if content:
12980                         try:
12981                                 mytimestamp = time.mktime(time.strptime(content[0],
12982                                         "%a, %d %b %Y %H:%M:%S +0000"))
12983                         except (OverflowError, ValueError):
12984                                 pass
12985                 del content
12986
12987                 try:
12988                         rsync_initial_timeout = \
12989                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12990                 except ValueError:
12991                         rsync_initial_timeout = 15
12992
12993                 try:
12994                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12995                 except SystemExit, e:
12996                         raise # Needed else can't exit
12997                 except:
12998                         maxretries=3 #default number of retries
12999
13000                 retries=0
13001                 user_name, hostname, port = re.split(
13002                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
13003                 if port is None:
13004                         port=""
13005                 if user_name is None:
13006                         user_name=""
13007                 updatecache_flg=True
13008                 all_rsync_opts = set(rsync_opts)
13009                 extra_rsync_opts = shlex.split(
13010                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
13011                 all_rsync_opts.update(extra_rsync_opts)
13012                 family = socket.AF_INET
13013                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
13014                         family = socket.AF_INET
13015                 elif socket.has_ipv6 and \
13016                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
13017                         family = socket.AF_INET6
13018                 ips=[]
13019                 SERVER_OUT_OF_DATE = -1
13020                 EXCEEDED_MAX_RETRIES = -2
13021                 while (1):
13022                         if ips:
13023                                 del ips[0]
13024                         if ips==[]:
13025                                 try:
13026                                         for addrinfo in socket.getaddrinfo(
13027                                                 hostname, None, family, socket.SOCK_STREAM):
13028                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
13029                                                         # IPv6 addresses need to be enclosed in square brackets
13030                                                         ips.append("[%s]" % addrinfo[4][0])
13031                                                 else:
13032                                                         ips.append(addrinfo[4][0])
13033                                         from random import shuffle
13034                                         shuffle(ips)
13035                                 except SystemExit, e:
13036                                         raise # Needed else can't exit
13037                                 except Exception, e:
13038                                         print "Notice:",str(e)
13039                                         dosyncuri=syncuri
13040
13041                         if ips:
13042                                 try:
13043                                         dosyncuri = syncuri.replace(
13044                                                 "//" + user_name + hostname + port + "/",
13045                                                 "//" + user_name + ips[0] + port + "/", 1)
13046                                 except SystemExit, e:
13047                                         raise # Needed else can't exit
13048                                 except Exception, e:
13049                                         print "Notice:",str(e)
13050                                         dosyncuri=syncuri
13051
13052                         if (retries==0):
13053                                 if "--ask" in myopts:
13054                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13055                                                 print
13056                                                 print "Quitting."
13057                                                 print
13058                                                 sys.exit(0)
13059                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13060                                 if "--quiet" not in myopts:
13061                                         print ">>> Starting rsync with "+dosyncuri+"..."
13062                         else:
13063                                 emergelog(xterm_titles,
13064                                         ">>> Starting retry %d of %d with %s" % \
13065                                                 (retries,maxretries,dosyncuri))
13066                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13067
13068                         if mytimestamp != 0 and "--quiet" not in myopts:
13069                                 print ">>> Checking server timestamp ..."
13070
13071                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13072
13073                         if "--debug" in myopts:
13074                                 print rsynccommand
13075
13076                         exitcode = os.EX_OK
13077                         servertimestamp = 0
13078                         # Even if there's no timestamp available locally, fetch the
13079                         # timestamp anyway as an initial probe to verify that the server is
13080                         # responsive.  This protects us from hanging indefinitely on a
13081                         # connection attempt to an unresponsive server which rsync's
13082                         # --timeout option does not prevent.
13083                         if True:
13084                                 # Temporary file for remote server timestamp comparison.
13085                                 from tempfile import mkstemp
13086                                 fd, tmpservertimestampfile = mkstemp()
13087                                 os.close(fd)
13088                                 mycommand = rsynccommand[:]
13089                                 mycommand.append(dosyncuri.rstrip("/") + \
13090                                         "/metadata/timestamp.chk")
13091                                 mycommand.append(tmpservertimestampfile)
13092                                 content = None
13093                                 mypids = []
13094                                 try:
13095                                         def timeout_handler(signum, frame):
13096                                                 raise portage.exception.PortageException("timed out")
13097                                         signal.signal(signal.SIGALRM, timeout_handler)
13098                                         # Timeout here in case the server is unresponsive.  The
13099                                         # --timeout rsync option doesn't apply to the initial
13100                                         # connection attempt.
13101                                         if rsync_initial_timeout:
13102                                                 signal.alarm(rsync_initial_timeout)
13103                                         try:
13104                                                 mypids.extend(portage.process.spawn(
13105                                                         mycommand, env=settings.environ(), returnpid=True))
13106                                                 exitcode = os.waitpid(mypids[0], 0)[1]
13107                                                 content = portage.grabfile(tmpservertimestampfile)
13108                                         finally:
13109                                                 if rsync_initial_timeout:
13110                                                         signal.alarm(0)
13111                                                 try:
13112                                                         os.unlink(tmpservertimestampfile)
13113                                                 except OSError:
13114                                                         pass
13115                                 except portage.exception.PortageException, e:
13116                                         # timed out
13117                                         print e
13118                                         del e
13119                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13120                                                 os.kill(mypids[0], signal.SIGTERM)
13121                                                 os.waitpid(mypids[0], 0)
13122                                         # This is the same code rsync uses for timeout.
13123                                         exitcode = 30
13124                                 else:
13125                                         if exitcode != os.EX_OK:
13126                                                 if exitcode & 0xff:
13127                                                         exitcode = (exitcode & 0xff) << 8
13128                                                 else:
13129                                                         exitcode = exitcode >> 8
13130                                 if mypids:
13131                                         portage.process.spawned_pids.remove(mypids[0])
13132                                 if content:
13133                                         try:
13134                                                 servertimestamp = time.mktime(time.strptime(
13135                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13136                                         except (OverflowError, ValueError):
13137                                                 pass
13138                                 del mycommand, mypids, content
13139                         if exitcode == os.EX_OK:
13140                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13141                                         emergelog(xterm_titles,
13142                                                 ">>> Cancelling sync -- Already current.")
13143                                         print
13144                                         print ">>>"
13145                                         print ">>> Timestamps on the server and in the local repository are the same."
13146                                         print ">>> Cancelling all further sync action. You are already up to date."
13147                                         print ">>>"
13148                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13149                                         print ">>>"
13150                                         print
13151                                         sys.exit(0)
13152                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13153                                         emergelog(xterm_titles,
13154                                                 ">>> Server out of date: %s" % dosyncuri)
13155                                         print
13156                                         print ">>>"
13157                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13158                                         print ">>>"
13159                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13160                                         print ">>>"
13161                                         print
13162                                         exitcode = SERVER_OUT_OF_DATE
13163                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13164                                         # actual sync
13165                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13166                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13167                                         if exitcode in [0,1,3,4,11,14,20,21]:
13168                                                 break
13169                         elif exitcode in [1,3,4,11,14,20,21]:
13170                                 break
13171                         else:
13172                                 # Code 2 indicates protocol incompatibility, which is expected
13173                                 # for servers with protocol < 29 that don't support
13174                                 # --prune-empty-directories.  Retry for a server that supports
13175                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13176                                 pass
13177
13178                         retries=retries+1
13179
13180                         if retries<=maxretries:
13181                                 print ">>> Retrying..."
13182                                 time.sleep(11)
13183                         else:
13184                                 # over retries
13185                                 # exit loop
13186                                 updatecache_flg=False
13187                                 exitcode = EXCEEDED_MAX_RETRIES
13188                                 break
13189
13190                 if (exitcode==0):
13191                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13192                 elif exitcode == SERVER_OUT_OF_DATE:
13193                         sys.exit(1)
13194                 elif exitcode == EXCEEDED_MAX_RETRIES:
13195                         sys.stderr.write(
13196                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13197                         sys.exit(1)
13198                 elif (exitcode>0):
13199                         msg = []
13200                         if exitcode==1:
13201                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13202                                 msg.append("that your SYNC statement is proper.")
13203                                 msg.append("SYNC=" + settings["SYNC"])
13204                         elif exitcode==11:
13205                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13206                                 msg.append("this means your disk is full, but can be caused by corruption")
13207                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13208                                 msg.append("and try again after the problem has been fixed.")
13209                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13210                         elif exitcode==20:
13211                                 msg.append("Rsync was killed before it finished.")
13212                         else:
13213                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13214                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13215                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13216                                 msg.append("temporary problem unless complications exist with your network")
13217                                 msg.append("(and possibly your system's filesystem) configuration.")
13218                         for line in msg:
13219                                 out.eerror(line)
13220                         sys.exit(exitcode)
13221         elif syncuri[:6]=="cvs://":
13222                 if not os.path.exists("/usr/bin/cvs"):
13223                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13224                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13225                         sys.exit(1)
13226                 cvsroot=syncuri[6:]
13227                 cvsdir=os.path.dirname(myportdir)
13228                 if not os.path.exists(myportdir+"/CVS"):
13229                         #initial checkout
13230                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13231                         if os.path.exists(cvsdir+"/gentoo-x86"):
13232                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13233                                 sys.exit(1)
13234                         try:
13235                                 os.rmdir(myportdir)
13236                         except OSError, e:
13237                                 if e.errno != errno.ENOENT:
13238                                         sys.stderr.write(
13239                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13240                                         sys.exit(1)
13241                                 del e
13242                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13243                                 print "!!! cvs checkout error; exiting."
13244                                 sys.exit(1)
13245                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13246                 else:
13247                         #cvs update
13248                         print ">>> Starting cvs update with "+syncuri+"..."
13249                         retval = portage.process.spawn_bash(
13250                                 "cd %s; cvs -z0 -q update -dP" % \
13251                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13252                         if retval != os.EX_OK:
13253                                 sys.exit(retval)
13254                 dosyncuri = syncuri
13255         else:
13256                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13257                         noiselevel=-1, level=logging.ERROR)
13258                 return 1
13259
13260         if updatecache_flg and  \
13261                 myaction != "metadata" and \
13262                 "metadata-transfer" not in settings.features:
13263                 updatecache_flg = False
13264
13265         # Reload the whole config from scratch.
13266         settings, trees, mtimedb = load_emerge_config(trees=trees)
13267         root_config = trees[settings["ROOT"]]["root_config"]
13268         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13269
13270         if updatecache_flg and \
13271                 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13272
13273                 # Only update cache for myportdir since that's
13274                 # the only one that's been synced here.
13275                 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13276
13277         if portage._global_updates(trees, mtimedb["updates"]):
13278                 mtimedb.commit()
13279                 # Reload the whole config from scratch.
13280                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13281                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13282                 root_config = trees[settings["ROOT"]]["root_config"]
13283
13284         mybestpv = portdb.xmatch("bestmatch-visible",
13285                 portage.const.PORTAGE_PACKAGE_ATOM)
13286         mypvs = portage.best(
13287                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13288                 portage.const.PORTAGE_PACKAGE_ATOM))
13289
13290         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13291
13292         if myaction != "metadata":
13293                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13294                         retval = portage.process.spawn(
13295                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13296                                 dosyncuri], env=settings.environ())
13297                         if retval != os.EX_OK:
13298                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13299
13300         if(mybestpv != mypvs) and not "--quiet" in myopts:
13301                 print
13302                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13303                 print red(" * ")+"that you update portage now, before any other packages are updated."
13304                 print
13305                 print red(" * ")+"To update portage, run 'emerge portage' now."
13306                 print
13307         
13308         display_news_notification(root_config, myopts)
13309         return os.EX_OK
13310
13311 def git_sync_timestamps(settings, portdir):
13312         """
13313         Since git doesn't preserve timestamps, synchronize timestamps between
13314         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13315         for a given file as long as the file in the working tree is not modified
13316         (relative to HEAD).
13317         """
13318         cache_dir = os.path.join(portdir, "metadata", "cache")
13319         if not os.path.isdir(cache_dir):
13320                 return os.EX_OK
13321         writemsg_level(">>> Synchronizing timestamps...\n")
13322
13323         from portage.cache.cache_errors import CacheError
13324         try:
13325                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13326                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13327         except CacheError, e:
13328                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13329                         level=logging.ERROR, noiselevel=-1)
13330                 return 1
13331
13332         ec_dir = os.path.join(portdir, "eclass")
13333         try:
13334                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13335                         if f.endswith(".eclass"))
13336         except OSError, e:
13337                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13338                         level=logging.ERROR, noiselevel=-1)
13339                 return 1
13340
13341         args = [portage.const.BASH_BINARY, "-c",
13342                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13343                 portage._shell_quote(portdir)]
13344         import subprocess
13345         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13346         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13347         rval = proc.wait()
13348         if rval != os.EX_OK:
13349                 return rval
13350
13351         modified_eclasses = set(ec for ec in ec_names \
13352                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13353
13354         updated_ec_mtimes = {}
13355
13356         for cpv in cache_db:
13357                 cpv_split = portage.catpkgsplit(cpv)
13358                 if cpv_split is None:
13359                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13360                                 level=logging.ERROR, noiselevel=-1)
13361                         continue
13362
13363                 cat, pn, ver, rev = cpv_split
13364                 cat, pf = portage.catsplit(cpv)
13365                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13366                 if relative_eb_path in modified_files:
13367                         continue
13368
13369                 try:
13370                         cache_entry = cache_db[cpv]
13371                         eb_mtime = cache_entry.get("_mtime_")
13372                         ec_mtimes = cache_entry.get("_eclasses_")
13373                 except KeyError:
13374                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13375                                 level=logging.ERROR, noiselevel=-1)
13376                         continue
13377                 except CacheError, e:
13378                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13379                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13380                         continue
13381
13382                 if eb_mtime is None:
13383                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13384                                 level=logging.ERROR, noiselevel=-1)
13385                         continue
13386
13387                 try:
13388                         eb_mtime = long(eb_mtime)
13389                 except ValueError:
13390                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13391                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13392                         continue
13393
13394                 if ec_mtimes is None:
13395                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13396                                 level=logging.ERROR, noiselevel=-1)
13397                         continue
13398
13399                 if modified_eclasses.intersection(ec_mtimes):
13400                         continue
13401
13402                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13403                 if missing_eclasses:
13404                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13405                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13406                                 noiselevel=-1)
13407                         continue
13408
13409                 eb_path = os.path.join(portdir, relative_eb_path)
13410                 try:
13411                         current_eb_mtime = os.stat(eb_path)
13412                 except OSError:
13413                         writemsg_level("!!! Missing ebuild: %s\n" % \
13414                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13415                         continue
13416
13417                 inconsistent = False
13418                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13419                         updated_mtime = updated_ec_mtimes.get(ec)
13420                         if updated_mtime is not None and updated_mtime != ec_mtime:
13421                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13422                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13423                                 inconsistent = True
13424                                 break
13425
13426                 if inconsistent:
13427                         continue
13428
13429                 if current_eb_mtime != eb_mtime:
13430                         os.utime(eb_path, (eb_mtime, eb_mtime))
13431
13432                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13433                         if ec in updated_ec_mtimes:
13434                                 continue
13435                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13436                         current_mtime = long(os.stat(ec_path).st_mtime)
13437                         if current_mtime != ec_mtime:
13438                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13439                         updated_ec_mtimes[ec] = ec_mtime
13440
13441         return os.EX_OK
13442
13443 def action_metadata(settings, portdb, myopts, porttrees=None):
13444         if porttrees is None:
13445                 porttrees = portdb.porttrees
13446         portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13447         old_umask = os.umask(0002)
13448         cachedir = os.path.normpath(settings.depcachedir)
13449         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13450                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13451                                         "/sys", "/tmp", "/usr",  "/var"]:
13452                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13453                         "ROOT DIRECTORY ON YOUR SYSTEM."
13454                 print >> sys.stderr, \
13455                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13456                 sys.exit(73)
13457         if not os.path.exists(cachedir):
13458                 os.makedirs(cachedir)
13459
13460         auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13461         auxdbkeys = tuple(auxdbkeys)
13462
13463         class TreeData(object):
13464                 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13465                 def __init__(self, dest_db, eclass_db, path, src_db):
13466                         self.dest_db = dest_db
13467                         self.eclass_db = eclass_db
13468                         self.path = path
13469                         self.src_db = src_db
13470                         self.valid_nodes = set()
13471
13472         porttrees_data = []
13473         for path in porttrees:
13474                 src_db = portdb._pregen_auxdb.get(path)
13475                 if src_db is None and \
13476                         os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13477                         src_db = portdb.metadbmodule(
13478                                 path, 'metadata/cache', auxdbkeys, readonly=True)
13479                         try:
13480                                 src_db.ec = portdb._repo_info[path].eclass_db
13481                         except AttributeError:
13482                                 pass
13483
13484                 if src_db is not None:
13485                         porttrees_data.append(TreeData(portdb.auxdb[path],
13486                                 portdb._repo_info[path].eclass_db, path, src_db))
13487
13488         porttrees = [tree_data.path for tree_data in porttrees_data]
13489
13490         isatty = sys.stdout.isatty()
13491         quiet = not isatty or '--quiet' in myopts
13492         onProgress = None
13493         if not quiet:
13494                 progressBar = portage.output.TermProgressBar()
13495                 progressHandler = ProgressHandler()
13496                 onProgress = progressHandler.onProgress
13497                 def display():
13498                         progressBar.set(progressHandler.curval, progressHandler.maxval)
13499                 progressHandler.display = display
13500                 def sigwinch_handler(signum, frame):
13501                         lines, progressBar.term_columns = \
13502                                 portage.output.get_term_size()
13503                 signal.signal(signal.SIGWINCH, sigwinch_handler)
13504
13505         # Temporarily override portdb.porttrees so portdb.cp_all()
13506         # will only return the relevant subset.
13507         portdb_porttrees = portdb.porttrees
13508         portdb.porttrees = porttrees
13509         try:
13510                 cp_all = portdb.cp_all()
13511         finally:
13512                 portdb.porttrees = portdb_porttrees
13513
13514         curval = 0
13515         maxval = len(cp_all)
13516         if onProgress is not None:
13517                 onProgress(maxval, curval)
13518
13519         from portage.cache.util import quiet_mirroring
13520         from portage import eapi_is_supported, \
13521                 _validate_cache_for_unsupported_eapis
13522
13523         # TODO: Display error messages, but do not interfere with the progress bar.
13524         # Here's how:
13525         #  1) erase the progress bar
13526         #  2) show the error message
13527         #  3) redraw the progress bar on a new line
13528         noise = quiet_mirroring()
13529
13530         for cp in cp_all:
13531                 for tree_data in porttrees_data:
13532                         for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13533                                 tree_data.valid_nodes.add(cpv)
13534                                 try:
13535                                         src = tree_data.src_db[cpv]
13536                                 except KeyError, e:
13537                                         noise.missing_entry(cpv)
13538                                         del e
13539                                         continue
13540                                 except CacheError, ce:
13541                                         noise.exception(cpv, ce)
13542                                         del ce
13543                                         continue
13544
13545                                 eapi = src.get('EAPI')
13546                                 if not eapi:
13547                                         eapi = '0'
13548                                 eapi = eapi.lstrip('-')
13549                                 eapi_supported = eapi_is_supported(eapi)
13550                                 if not eapi_supported:
13551                                         if not _validate_cache_for_unsupported_eapis:
13552                                                 noise.misc(cpv, "unable to validate " + \
13553                                                         "cache for EAPI='%s'" % eapi)
13554                                                 continue
13555
13556                                 dest = None
13557                                 try:
13558                                         dest = tree_data.dest_db[cpv]
13559                                 except (KeyError, CacheError):
13560                                         pass
13561
13562                                 for d in (src, dest):
13563                                         if d is not None and d.get('EAPI') in ('', '0'):
13564                                                 del d['EAPI']
13565
13566                                 if dest is not None:
13567                                         if not (dest['_mtime_'] == src['_mtime_'] and \
13568                                                 tree_data.eclass_db.is_eclass_data_valid(
13569                                                         dest['_eclasses_']) and \
13570                                                 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13571                                                 dest = None
13572                                         else:
13573                                                 # We don't want to skip the write unless we're really
13574                                                 # sure that the existing cache is identical, so don't
13575                                                 # trust _mtime_ and _eclasses_ alone.
13576                                                 for k in set(chain(src, dest)).difference(
13577                                                         ('_mtime_', '_eclasses_')):
13578                                                         if dest.get(k, '') != src.get(k, ''):
13579                                                                 dest = None
13580                                                                 break
13581
13582                                 if dest is not None:
13583                                         # The existing data is valid and identical,
13584                                         # so there's no need to overwrite it.
13585                                         continue
13586
13587                                 try:
13588                                         inherited = src.get('INHERITED', '')
13589                                         eclasses = src.get('_eclasses_')
13590                                 except CacheError, ce:
13591                                         noise.exception(cpv, ce)
13592                                         del ce
13593                                         continue
13594
13595                                 if eclasses is not None:
13596                                         if not tree_data.eclass_db.is_eclass_data_valid(
13597                                                 src['_eclasses_']):
13598                                                 noise.eclass_stale(cpv)
13599                                                 continue
13600                                         inherited = eclasses
13601                                 else:
13602                                         inherited = inherited.split()
13603
13604                                 if tree_data.src_db.complete_eclass_entries and \
13605                                         eclasses is None:
13606                                         noise.corruption(cpv, "missing _eclasses_ field")
13607                                         continue
13608
13609                                 if inherited:
13610                                         # Even if _eclasses_ already exists, replace it with data from
13611                                         # eclass_cache, in order to insert local eclass paths.
13612                                         try:
13613                                                 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13614                                         except KeyError:
13615                                                 # INHERITED contains a non-existent eclass.
13616                                                 noise.eclass_stale(cpv)
13617                                                 continue
13618
13619                                         if eclasses is None:
13620                                                 noise.eclass_stale(cpv)
13621                                                 continue
13622                                         src['_eclasses_'] = eclasses
13623                                 else:
13624                                         src['_eclasses_'] = {}
13625
13626                                 if not eapi_supported:
13627                                         src = {
13628                                                 'EAPI'       : '-' + eapi,
13629                                                 '_mtime_'    : src['_mtime_'],
13630                                                 '_eclasses_' : src['_eclasses_'],
13631                                         }
13632
13633                                 try:
13634                                         tree_data.dest_db[cpv] = src
13635                                 except CacheError, ce:
13636                                         noise.exception(cpv, ce)
13637                                         del ce
13638
13639                 curval += 1
13640                 if onProgress is not None:
13641                         onProgress(maxval, curval)
13642
13643         if onProgress is not None:
13644                 onProgress(maxval, curval)
13645
13646         for tree_data in porttrees_data:
13647                 try:
13648                         dead_nodes = set(tree_data.dest_db.iterkeys())
13649                 except CacheError, e:
13650                         writemsg_level("Error listing cache entries for " + \
13651                                 "'%s': %s, continuing...\n" % (tree_data.path, e),
13652                                 level=logging.ERROR, noiselevel=-1)
13653                         del e
13654                 else:
13655                         dead_nodes.difference_update(tree_data.valid_nodes)
13656                         for cpv in dead_nodes:
13657                                 try:
13658                                         del tree_data.dest_db[cpv]
13659                                 except (KeyError, CacheError):
13660                                         pass
13661
13662         if not quiet:
13663                 # make sure the final progress is displayed
13664                 progressHandler.display()
13665                 print
13666                 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13667
13668         sys.stdout.flush()
13669         os.umask(old_umask)
13670
13671 def action_regen(settings, portdb, max_jobs, max_load):
13672         xterm_titles = "notitles" not in settings.features
13673         emergelog(xterm_titles, " === regen")
13674         #regenerate cache entries
13675         portage.writemsg_stdout("Regenerating cache entries...\n")
13676         try:
13677                 os.close(sys.stdin.fileno())
13678         except SystemExit, e:
13679                 raise # Needed else can't exit
13680         except:
13681                 pass
13682         sys.stdout.flush()
13683
13684         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13685         regen.run()
13686
13687         portage.writemsg_stdout("done!\n")
13688         return regen.returncode
13689
13690 def action_config(settings, trees, myopts, myfiles):
13691         if len(myfiles) != 1:
13692                 print red("!!! config can only take a single package atom at this time\n")
13693                 sys.exit(1)
13694         if not is_valid_package_atom(myfiles[0]):
13695                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13696                         noiselevel=-1)
13697                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13698                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13699                 sys.exit(1)
13700         print
13701         try:
13702                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13703         except portage.exception.AmbiguousPackageName, e:
13704                 # Multiple matches thrown from cpv_expand
13705                 pkgs = e.args[0]
13706         if len(pkgs) == 0:
13707                 print "No packages found.\n"
13708                 sys.exit(0)
13709         elif len(pkgs) > 1:
13710                 if "--ask" in myopts:
13711                         options = []
13712                         print "Please select a package to configure:"
13713                         idx = 0
13714                         for pkg in pkgs:
13715                                 idx += 1
13716                                 options.append(str(idx))
13717                                 print options[-1]+") "+pkg
13718                         print "X) Cancel"
13719                         options.append("X")
13720                         idx = userquery("Selection?", options)
13721                         if idx == "X":
13722                                 sys.exit(0)
13723                         pkg = pkgs[int(idx)-1]
13724                 else:
13725                         print "The following packages available:"
13726                         for pkg in pkgs:
13727                                 print "* "+pkg
13728                         print "\nPlease use a specific atom or the --ask option."
13729                         sys.exit(1)
13730         else:
13731                 pkg = pkgs[0]
13732
13733         print
13734         if "--ask" in myopts:
13735                 if userquery("Ready to configure "+pkg+"?") == "No":
13736                         sys.exit(0)
13737         else:
13738                 print "Configuring pkg..."
13739         print
13740         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13741         mysettings = portage.config(clone=settings)
13742         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13743         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13744         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13745                 mysettings,
13746                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13747                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13748         if retval == os.EX_OK:
13749                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13750                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13751         print
13752
13753 def action_info(settings, trees, myopts, myfiles):
13754         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13755                 settings.profile_path, settings["CHOST"],
13756                 trees[settings["ROOT"]]["vartree"].dbapi)
13757         header_width = 65
13758         header_title = "System Settings"
13759         if myfiles:
13760                 print header_width * "="
13761                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13762         print header_width * "="
13763         print "System uname: "+platform.platform(aliased=1)
13764
13765         lastSync = portage.grabfile(os.path.join(
13766                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13767         print "Timestamp of tree:",
13768         if lastSync:
13769                 print lastSync[0]
13770         else:
13771                 print "Unknown"
13772
13773         output=commands.getstatusoutput("distcc --version")
13774         if not output[0]:
13775                 print str(output[1].split("\n",1)[0]),
13776                 if "distcc" in settings.features:
13777                         print "[enabled]"
13778                 else:
13779                         print "[disabled]"
13780
13781         output=commands.getstatusoutput("ccache -V")
13782         if not output[0]:
13783                 print str(output[1].split("\n",1)[0]),
13784                 if "ccache" in settings.features:
13785                         print "[enabled]"
13786                 else:
13787                         print "[disabled]"
13788
13789         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13790                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13791         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13792         myvars  = portage.util.unique_array(myvars)
13793         myvars.sort()
13794
13795         for x in myvars:
13796                 if portage.isvalidatom(x):
13797                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13798                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13799                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13800                         pkgs = []
13801                         for pn, ver, rev in pkg_matches:
13802                                 if rev != "r0":
13803                                         pkgs.append(ver + "-" + rev)
13804                                 else:
13805                                         pkgs.append(ver)
13806                         if pkgs:
13807                                 pkgs = ", ".join(pkgs)
13808                                 print "%-20s %s" % (x+":", pkgs)
13809                 else:
13810                         print "%-20s %s" % (x+":", "[NOT VALID]")
13811
13812         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13813
13814         if "--verbose" in myopts:
13815                 myvars=settings.keys()
13816         else:
13817                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13818                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13819                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13820                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13821
13822                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13823
13824         myvars = portage.util.unique_array(myvars)
13825         use_expand = settings.get('USE_EXPAND', '').split()
13826         use_expand.sort()
13827         use_expand_hidden = set(
13828                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13829         alphabetical_use = '--alphabetical' in myopts
13830         root_config = trees[settings["ROOT"]]['root_config']
13831         unset_vars = []
13832         myvars.sort()
13833         for x in myvars:
13834                 if x in settings:
13835                         if x != "USE":
13836                                 print '%s="%s"' % (x, settings[x])
13837                         else:
13838                                 use = set(settings["USE"].split())
13839                                 for varname in use_expand:
13840                                         flag_prefix = varname.lower() + "_"
13841                                         for f in list(use):
13842                                                 if f.startswith(flag_prefix):
13843                                                         use.remove(f)
13844                                 use = list(use)
13845                                 use.sort()
13846                                 print 'USE="%s"' % " ".join(use),
13847                                 for varname in use_expand:
13848                                         myval = settings.get(varname)
13849                                         if myval:
13850                                                 print '%s="%s"' % (varname, myval),
13851                                 print
13852                 else:
13853                         unset_vars.append(x)
13854         if unset_vars:
13855                 print "Unset:  "+", ".join(unset_vars)
13856         print
13857
13858         if "--debug" in myopts:
13859                 for x in dir(portage):
13860                         module = getattr(portage, x)
13861                         if "cvs_id_string" in dir(module):
13862                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13863
13864         # See if we can find any packages installed matching the strings
13865         # passed on the command line
13866         mypkgs = []
13867         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13868         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13869         for x in myfiles:
13870                 mypkgs.extend(vardb.match(x))
13871
13872         # If some packages were found...
13873         if mypkgs:
13874                 # Get our global settings (we only print stuff if it varies from
13875                 # the current config)
13876                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13877                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13878                 auxkeys.append('DEFINED_PHASES')
13879                 global_vals = {}
13880                 pkgsettings = portage.config(clone=settings)
13881
13882                 # Loop through each package
13883                 # Only print settings if they differ from global settings
13884                 header_title = "Package Settings"
13885                 print header_width * "="
13886                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13887                 print header_width * "="
13888                 from portage.output import EOutput
13889                 out = EOutput()
13890                 for cpv in mypkgs:
13891                         # Get all package specific variables
13892                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13893                         pkg = Package(built=True, cpv=cpv,
13894                                 installed=True, metadata=izip(Package.metadata_keys,
13895                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13896                                 root_config=root_config, type_name='installed')
13897
13898                         print "\n%s was built with the following:" % \
13899                                 colorize("INFORM", str(pkg.cpv))
13900
13901                         pkgsettings.setcpv(pkg)
13902                         forced_flags = set(chain(pkgsettings.useforce,
13903                                 pkgsettings.usemask))
13904                         use = set(pkg.use.enabled)
13905                         use.discard(pkgsettings.get('ARCH'))
13906                         use_expand_flags = set()
13907                         use_enabled = {}
13908                         use_disabled = {}
13909                         for varname in use_expand:
13910                                 flag_prefix = varname.lower() + "_"
13911                                 for f in use:
13912                                         if f.startswith(flag_prefix):
13913                                                 use_expand_flags.add(f)
13914                                                 use_enabled.setdefault(
13915                                                         varname.upper(), []).append(f[len(flag_prefix):])
13916
13917                                 for f in pkg.iuse.all:
13918                                         if f.startswith(flag_prefix):
13919                                                 use_expand_flags.add(f)
13920                                                 if f not in use:
13921                                                         use_disabled.setdefault(
13922                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13923
13924                         var_order = set(use_enabled)
13925                         var_order.update(use_disabled)
13926                         var_order = sorted(var_order)
13927                         var_order.insert(0, 'USE')
13928                         use.difference_update(use_expand_flags)
13929                         use_enabled['USE'] = list(use)
13930                         use_disabled['USE'] = []
13931
13932                         for f in pkg.iuse.all:
13933                                 if f not in use and \
13934                                         f not in use_expand_flags:
13935                                         use_disabled['USE'].append(f)
13936
13937                         for varname in var_order:
13938                                 if varname in use_expand_hidden:
13939                                         continue
13940                                 flags = []
13941                                 for f in use_enabled.get(varname, []):
13942                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13943                                 for f in use_disabled.get(varname, []):
13944                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13945                                 if alphabetical_use:
13946                                         flags.sort(key=UseFlagDisplay.sort_combined)
13947                                 else:
13948                                         flags.sort(key=UseFlagDisplay.sort_separated)
13949                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13950                         print
13951
13952                         for myvar in mydesiredvars:
13953                                 if metadata[myvar].split() != settings.get(myvar, '').split():
13954                                         print "%s=\"%s\"" % (myvar, metadata[myvar])
13955                         print
13956
13957                         if metadata['DEFINED_PHASES']:
13958                                 if 'info' not in metadata['DEFINED_PHASES'].split():
13959                                         continue
13960
13961                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13962                         ebuildpath = vardb.findname(pkg.cpv)
13963                         if not ebuildpath or not os.path.exists(ebuildpath):
13964                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13965                                 continue
13966                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13967                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13968                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13969                                 tree="vartree")
13970
13971 def action_search(root_config, myopts, myfiles, spinner):
13972         if not myfiles:
13973                 print "emerge: no search terms provided."
13974         else:
13975                 searchinstance = search(root_config,
13976                         spinner, "--searchdesc" in myopts,
13977                         "--quiet" not in myopts, "--usepkg" in myopts,
13978                         "--usepkgonly" in myopts)
13979                 for mysearch in myfiles:
13980                         try:
13981                                 searchinstance.execute(mysearch)
13982                         except re.error, comment:
13983                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13984                                 sys.exit(1)
13985                         searchinstance.output()
13986
13987 def action_uninstall(settings, trees, ldpath_mtimes,
13988         opts, action, files, spinner):
13989
13990         # For backward compat, some actions do not require leading '='.
13991         ignore_missing_eq = action in ('clean', 'unmerge')
13992         root = settings['ROOT']
13993         vardb = trees[root]['vartree'].dbapi
13994         valid_atoms = []
13995         lookup_owners = []
13996
13997         # Ensure atoms are valid before calling unmerge().
13998         # For backward compat, leading '=' is not required.
13999         for x in files:
14000                 if is_valid_package_atom(x) or \
14001                         (ignore_missing_eq and is_valid_package_atom('=' + x)):
14002
14003                         try:
14004                                 valid_atoms.append(
14005                                         portage.dep_expand(x, mydb=vardb, settings=settings))
14006                         except portage.exception.AmbiguousPackageName, e:
14007                                 msg = "The short ebuild name \"" + x + \
14008                                         "\" is ambiguous.  Please specify " + \
14009                                         "one of the following " + \
14010                                         "fully-qualified ebuild names instead:"
14011                                 for line in textwrap.wrap(msg, 70):
14012                                         writemsg_level("!!! %s\n" % (line,),
14013                                                 level=logging.ERROR, noiselevel=-1)
14014                                 for i in e[0]:
14015                                         writemsg_level("    %s\n" % colorize("INFORM", i),
14016                                                 level=logging.ERROR, noiselevel=-1)
14017                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
14018                                 return 1
14019
14020                 elif x.startswith(os.sep):
14021                         if not x.startswith(root):
14022                                 writemsg_level(("!!! '%s' does not start with" + \
14023                                         " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14024                                 return 1
14025                         # Queue these up since it's most efficient to handle
14026                         # multiple files in a single iter_owners() call.
14027                         lookup_owners.append(x)
14028
14029                 else:
14030                         msg = []
14031                         msg.append("'%s' is not a valid package atom." % (x,))
14032                         msg.append("Please check ebuild(5) for full details.")
14033                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14034                                 level=logging.ERROR, noiselevel=-1)
14035                         return 1
14036
14037         if lookup_owners:
14038                 relative_paths = []
14039                 search_for_multiple = False
14040                 if len(lookup_owners) > 1:
14041                         search_for_multiple = True
14042
14043                 for x in lookup_owners:
14044                         if not search_for_multiple and os.path.isdir(x):
14045                                 search_for_multiple = True
14046                         relative_paths.append(x[len(root):])
14047
14048                 owners = set()
14049                 for pkg, relative_path in \
14050                         vardb._owners.iter_owners(relative_paths):
14051                         owners.add(pkg.mycpv)
14052                         if not search_for_multiple:
14053                                 break
14054
14055                 if owners:
14056                         for cpv in owners:
14057                                 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14058                                 if not slot:
14059                                         # portage now masks packages with missing slot, but it's
14060                                         # possible that one was installed by an older version
14061                                         atom = portage.cpv_getkey(cpv)
14062                                 else:
14063                                         atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14064                                 valid_atoms.append(portage.dep.Atom(atom))
14065                 else:
14066                         writemsg_level(("!!! '%s' is not claimed " + \
14067                                 "by any package.\n") % lookup_owners[0],
14068                                 level=logging.WARNING, noiselevel=-1)
14069
14070         if files and not valid_atoms:
14071                 return 1
14072
14073         if action in ('clean', 'unmerge') or \
14074                 (action == 'prune' and "--nodeps" in opts):
14075                 # When given a list of atoms, unmerge them in the order given.
14076                 ordered = action == 'unmerge'
14077                 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14078                         valid_atoms, ldpath_mtimes, ordered=ordered)
14079                 rval = os.EX_OK
14080         elif action == 'deselect':
14081                 rval = action_deselect(settings, trees, opts, valid_atoms)
14082         else:
14083                 rval = action_depclean(settings, trees, ldpath_mtimes,
14084                         opts, action, valid_atoms, spinner)
14085
14086         return rval
14087
14088 def action_deselect(settings, trees, opts, atoms):
14089         root_config = trees[settings['ROOT']]['root_config']
14090         world_set = root_config.sets['world']
14091         if not hasattr(world_set, 'update'):
14092                 writemsg_level("World set does not appear to be mutable.\n",
14093                         level=logging.ERROR, noiselevel=-1)
14094                 return 1
14095
14096         vardb = root_config.trees['vartree'].dbapi
14097         expanded_atoms = set(atoms)
14098         from portage.dep import Atom
14099         for atom in atoms:
14100                 for cpv in vardb.match(atom):
14101                         slot, = vardb.aux_get(cpv, ['SLOT'])
14102                         if not slot:
14103                                 slot = '0'
14104                         expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14105
14106         pretend = '--pretend' in opts
14107         locked = False
14108         if not pretend and hasattr(world_set, 'lock'):
14109                 world_set.lock()
14110                 locked = True
14111         try:
14112                 discard_atoms = set()
14113                 world_set.load()
14114                 for atom in world_set:
14115                         if not isinstance(atom, Atom):
14116                                 # nested set
14117                                 continue
14118                         for arg_atom in expanded_atoms:
14119                                 if arg_atom.intersects(atom) and \
14120                                         not (arg_atom.slot and not atom.slot):
14121                                         discard_atoms.add(atom)
14122                                         break
14123                 if discard_atoms:
14124                         for atom in sorted(discard_atoms):
14125                                 print ">>> Removing %s from \"world\" favorites file..." % \
14126                                         colorize("INFORM", str(atom))
14127
14128                         if '--ask' in opts:
14129                                 prompt = "Would you like to remove these " + \
14130                                         "packages from your world favorites?"
14131                                 if userquery(prompt) == 'No':
14132                                         return os.EX_OK
14133
14134                         remaining = set(world_set)
14135                         remaining.difference_update(discard_atoms)
14136                         if not pretend:
14137                                 world_set.replace(remaining)
14138                 else:
14139                         print ">>> No matching atoms found in \"world\" favorites file..."
14140         finally:
14141                 if locked:
14142                         world_set.unlock()
14143         return os.EX_OK
14144
14145 def action_depclean(settings, trees, ldpath_mtimes,
14146         myopts, action, myfiles, spinner):
14147         # Kill packages that aren't explicitly merged or are required as a
14148         # dependency of another package. World file is explicit.
14149
14150         # Global depclean or prune operations are not very safe when there are
14151         # missing dependencies since it's unknown how badly incomplete
14152         # the dependency graph is, and we might accidentally remove packages
14153         # that should have been pulled into the graph. On the other hand, it's
14154         # relatively safe to ignore missing deps when only asked to remove
14155         # specific packages.
14156         allow_missing_deps = len(myfiles) > 0
14157
14158         msg = []
14159         msg.append("Always study the list of packages to be cleaned for any obvious\n")
14160         msg.append("mistakes. Packages that are part of the world set will always\n")
14161         msg.append("be kept.  They can be manually added to this set with\n")
14162         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
14163         msg.append("package.provided (see portage(5)) will be removed by\n")
14164         msg.append("depclean, even if they are part of the world set.\n")
14165         msg.append("\n")
14166         msg.append("As a safety measure, depclean will not remove any packages\n")
14167         msg.append("unless *all* required dependencies have been resolved.  As a\n")
14168         msg.append("consequence, it is often necessary to run %s\n" % \
14169                 good("`emerge --update"))
14170         msg.append(good("--newuse --deep @system @world`") + \
14171                 " prior to depclean.\n")
14172
14173         if action == "depclean" and "--quiet" not in myopts and not myfiles:
14174                 portage.writemsg_stdout("\n")
14175                 for x in msg:
14176                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
14177
14178         xterm_titles = "notitles" not in settings.features
14179         myroot = settings["ROOT"]
14180         root_config = trees[myroot]["root_config"]
14181         getSetAtoms = root_config.setconfig.getSetAtoms
14182         vardb = trees[myroot]["vartree"].dbapi
14183         deselect = myopts.get('--deselect') != 'n'
14184
14185         required_set_names = ("system", "world")
14186         required_sets = {}
14187         set_args = []
14188
14189         for s in required_set_names:
14190                 required_sets[s] = InternalPackageSet(
14191                         initial_atoms=getSetAtoms(s))
14192
14193         
14194         # When removing packages, use a temporary version of world
14195         # which excludes packages that are intended to be eligible for
14196         # removal.
14197         world_temp_set = required_sets["world"]
14198         system_set = required_sets["system"]
14199
14200         if not system_set or not world_temp_set:
14201
14202                 if not system_set:
14203                         writemsg_level("!!! You have no system list.\n",
14204                                 level=logging.ERROR, noiselevel=-1)
14205
14206                 if not world_temp_set:
14207                         writemsg_level("!!! You have no world file.\n",
14208                                         level=logging.WARNING, noiselevel=-1)
14209
14210                 writemsg_level("!!! Proceeding is likely to " + \
14211                         "break your installation.\n",
14212                         level=logging.WARNING, noiselevel=-1)
14213                 if "--pretend" not in myopts:
14214                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14215
14216         if action == "depclean":
14217                 emergelog(xterm_titles, " >>> depclean")
14218
14219         import textwrap
14220         args_set = InternalPackageSet()
14221         if myfiles:
14222                 args_set.update(myfiles)
14223                 matched_packages = False
14224                 for x in args_set:
14225                         if vardb.match(x):
14226                                 matched_packages = True
14227                                 break
14228                 if not matched_packages:
14229                         writemsg_level(">>> No packages selected for removal by %s\n" % \
14230                                 action)
14231                         return
14232
14233         writemsg_level("\nCalculating dependencies  ")
14234         resolver_params = create_depgraph_params(myopts, "remove")
14235         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14236         vardb = resolver.trees[myroot]["vartree"].dbapi
14237
14238         if action == "depclean":
14239
14240                 if args_set:
14241
14242                         if deselect:
14243                                 world_temp_set.clear()
14244
14245                         # Pull in everything that's installed but not matched
14246                         # by an argument atom since we don't want to clean any
14247                         # package if something depends on it.
14248                         for pkg in vardb:
14249                                 spinner.update()
14250
14251                                 try:
14252                                         if args_set.findAtomForPackage(pkg) is None:
14253                                                 world_temp_set.add("=" + pkg.cpv)
14254                                                 continue
14255                                 except portage.exception.InvalidDependString, e:
14256                                         show_invalid_depstring_notice(pkg,
14257                                                 pkg.metadata["PROVIDE"], str(e))
14258                                         del e
14259                                         world_temp_set.add("=" + pkg.cpv)
14260                                         continue
14261
14262         elif action == "prune":
14263
14264                 if deselect:
14265                         world_temp_set.clear()
14266
14267                 # Pull in everything that's installed since we don't
14268                 # to prune a package if something depends on it.
14269                 world_temp_set.update(vardb.cp_all())
14270
14271                 if not args_set:
14272
14273                         # Try to prune everything that's slotted.
14274                         for cp in vardb.cp_all():
14275                                 if len(vardb.cp_list(cp)) > 1:
14276                                         args_set.add(cp)
14277
14278                 # Remove atoms from world that match installed packages
14279                 # that are also matched by argument atoms, but do not remove
14280                 # them if they match the highest installed version.
14281                 for pkg in vardb:
14282                         spinner.update()
14283                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14284                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
14285                                 raise AssertionError("package expected in matches: " + \
14286                                         "cp = %s, cpv = %s matches = %s" % \
14287                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14288
14289                         highest_version = pkgs_for_cp[-1]
14290                         if pkg == highest_version:
14291                                 # pkg is the highest version
14292                                 world_temp_set.add("=" + pkg.cpv)
14293                                 continue
14294
14295                         if len(pkgs_for_cp) <= 1:
14296                                 raise AssertionError("more packages expected: " + \
14297                                         "cp = %s, cpv = %s matches = %s" % \
14298                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14299
14300                         try:
14301                                 if args_set.findAtomForPackage(pkg) is None:
14302                                         world_temp_set.add("=" + pkg.cpv)
14303                                         continue
14304                         except portage.exception.InvalidDependString, e:
14305                                 show_invalid_depstring_notice(pkg,
14306                                         pkg.metadata["PROVIDE"], str(e))
14307                                 del e
14308                                 world_temp_set.add("=" + pkg.cpv)
14309                                 continue
14310
14311         set_args = {}
14312         for s, package_set in required_sets.iteritems():
14313                 set_atom = SETPREFIX + s
14314                 set_arg = SetArg(arg=set_atom, set=package_set,
14315                         root_config=resolver.roots[myroot])
14316                 set_args[s] = set_arg
14317                 for atom in set_arg.set:
14318                         resolver._dep_stack.append(
14319                                 Dependency(atom=atom, root=myroot, parent=set_arg))
14320                         resolver.digraph.add(set_arg, None)
14321
14322         success = resolver._complete_graph()
14323         writemsg_level("\b\b... done!\n")
14324
14325         resolver.display_problems()
14326
14327         if not success:
14328                 return 1
14329
14330         def unresolved_deps():
14331
14332                 unresolvable = set()
14333                 for dep in resolver._initially_unsatisfied_deps:
14334                         if isinstance(dep.parent, Package) and \
14335                                 (dep.priority > UnmergeDepPriority.SOFT):
14336                                 unresolvable.add((dep.atom, dep.parent.cpv))
14337
14338                 if not unresolvable:
14339                         return False
14340
14341                 if unresolvable and not allow_missing_deps:
14342                         prefix = bad(" * ")
14343                         msg = []
14344                         msg.append("Dependencies could not be completely resolved due to")
14345                         msg.append("the following required packages not being installed:")
14346                         msg.append("")
14347                         for atom, parent in unresolvable:
14348                                 msg.append("  %s pulled in by:" % (atom,))
14349                                 msg.append("    %s" % (parent,))
14350                                 msg.append("")
14351                         msg.append("Have you forgotten to run " + \
14352                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
14353                         msg.append(("to %s? It may be necessary to manually " + \
14354                                 "uninstall packages that no longer") % action)
14355                         msg.append("exist in the portage tree since " + \
14356                                 "it may not be possible to satisfy their")
14357                         msg.append("dependencies.  Also, be aware of " + \
14358                                 "the --with-bdeps option that is documented")
14359                         msg.append("in " + good("`man emerge`") + ".")
14360                         if action == "prune":
14361                                 msg.append("")
14362                                 msg.append("If you would like to ignore " + \
14363                                         "dependencies then use %s." % good("--nodeps"))
14364                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14365                                 level=logging.ERROR, noiselevel=-1)
14366                         return True
14367                 return False
14368
14369         if unresolved_deps():
14370                 return 1
14371
14372         graph = resolver.digraph.copy()
14373         required_pkgs_total = 0
14374         for node in graph:
14375                 if isinstance(node, Package):
14376                         required_pkgs_total += 1
14377
14378         def show_parents(child_node):
14379                 parent_nodes = graph.parent_nodes(child_node)
14380                 if not parent_nodes:
14381                         # With --prune, the highest version can be pulled in without any
14382                         # real parent since all installed packages are pulled in.  In that
14383                         # case there's nothing to show here.
14384                         return
14385                 parent_strs = []
14386                 for node in parent_nodes:
14387                         parent_strs.append(str(getattr(node, "cpv", node)))
14388                 parent_strs.sort()
14389                 msg = []
14390                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
14391                 for parent_str in parent_strs:
14392                         msg.append("    %s\n" % (parent_str,))
14393                 msg.append("\n")
14394                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14395
14396         def cmp_pkg_cpv(pkg1, pkg2):
14397                 """Sort Package instances by cpv."""
14398                 if pkg1.cpv > pkg2.cpv:
14399                         return 1
14400                 elif pkg1.cpv == pkg2.cpv:
14401                         return 0
14402                 else:
14403                         return -1
14404
14405         def create_cleanlist():
14406                 pkgs_to_remove = []
14407
14408                 if action == "depclean":
14409                         if args_set:
14410
14411                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14412                                         arg_atom = None
14413                                         try:
14414                                                 arg_atom = args_set.findAtomForPackage(pkg)
14415                                         except portage.exception.InvalidDependString:
14416                                                 # this error has already been displayed by now
14417                                                 continue
14418
14419                                         if arg_atom:
14420                                                 if pkg not in graph:
14421                                                         pkgs_to_remove.append(pkg)
14422                                                 elif "--verbose" in myopts:
14423                                                         show_parents(pkg)
14424
14425                         else:
14426                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14427                                         if pkg not in graph:
14428                                                 pkgs_to_remove.append(pkg)
14429                                         elif "--verbose" in myopts:
14430                                                 show_parents(pkg)
14431
14432                 elif action == "prune":
14433                         # Prune really uses all installed instead of world. It's not
14434                         # a real reverse dependency so don't display it as such.
14435                         graph.remove(set_args["world"])
14436
14437                         for atom in args_set:
14438                                 for pkg in vardb.match_pkgs(atom):
14439                                         if pkg not in graph:
14440                                                 pkgs_to_remove.append(pkg)
14441                                         elif "--verbose" in myopts:
14442                                                 show_parents(pkg)
14443
14444                 if not pkgs_to_remove:
14445                         writemsg_level(
14446                                 ">>> No packages selected for removal by %s\n" % action)
14447                         if "--verbose" not in myopts:
14448                                 writemsg_level(
14449                                         ">>> To see reverse dependencies, use %s\n" % \
14450                                                 good("--verbose"))
14451                         if action == "prune":
14452                                 writemsg_level(
14453                                         ">>> To ignore dependencies, use %s\n" % \
14454                                                 good("--nodeps"))
14455
14456                 return pkgs_to_remove
14457
14458         cleanlist = create_cleanlist()
14459
14460         if len(cleanlist):
14461                 clean_set = set(cleanlist)
14462
14463                 # Check if any of these package are the sole providers of libraries
14464                 # with consumers that have not been selected for removal. If so, these
14465                 # packages and any dependencies need to be added to the graph.
14466                 real_vardb = trees[myroot]["vartree"].dbapi
14467                 linkmap = real_vardb.linkmap
14468                 liblist = linkmap.listLibraryObjects()
14469                 consumer_cache = {}
14470                 provider_cache = {}
14471                 soname_cache = {}
14472                 consumer_map = {}
14473
14474                 writemsg_level(">>> Checking for lib consumers...\n")
14475
14476                 for pkg in cleanlist:
14477                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14478                         provided_libs = set()
14479
14480                         for lib in liblist:
14481                                 if pkg_dblink.isowner(lib, myroot):
14482                                         provided_libs.add(lib)
14483
14484                         if not provided_libs:
14485                                 continue
14486
14487                         consumers = {}
14488                         for lib in provided_libs:
14489                                 lib_consumers = consumer_cache.get(lib)
14490                                 if lib_consumers is None:
14491                                         lib_consumers = linkmap.findConsumers(lib)
14492                                         consumer_cache[lib] = lib_consumers
14493                                 if lib_consumers:
14494                                         consumers[lib] = lib_consumers
14495
14496                         if not consumers:
14497                                 continue
14498
14499                         for lib, lib_consumers in consumers.items():
14500                                 for consumer_file in list(lib_consumers):
14501                                         if pkg_dblink.isowner(consumer_file, myroot):
14502                                                 lib_consumers.remove(consumer_file)
14503                                 if not lib_consumers:
14504                                         del consumers[lib]
14505
14506                         if not consumers:
14507                                 continue
14508
14509                         for lib, lib_consumers in consumers.iteritems():
14510
14511                                 soname = soname_cache.get(lib)
14512                                 if soname is None:
14513                                         soname = linkmap.getSoname(lib)
14514                                         soname_cache[lib] = soname
14515
14516                                 consumer_providers = []
14517                                 for lib_consumer in lib_consumers:
14518                                         providers = provider_cache.get(lib)
14519                                         if providers is None:
14520                                                 providers = linkmap.findProviders(lib_consumer)
14521                                                 provider_cache[lib_consumer] = providers
14522                                         if soname not in providers:
14523                                                 # Why does this happen?
14524                                                 continue
14525                                         consumer_providers.append(
14526                                                 (lib_consumer, providers[soname]))
14527
14528                                 consumers[lib] = consumer_providers
14529
14530                         consumer_map[pkg] = consumers
14531
14532                 if consumer_map:
14533
14534                         search_files = set()
14535                         for consumers in consumer_map.itervalues():
14536                                 for lib, consumer_providers in consumers.iteritems():
14537                                         for lib_consumer, providers in consumer_providers:
14538                                                 search_files.add(lib_consumer)
14539                                                 search_files.update(providers)
14540
14541                         writemsg_level(">>> Assigning files to packages...\n")
14542                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14543
14544                         for pkg, consumers in consumer_map.items():
14545                                 for lib, consumer_providers in consumers.items():
14546                                         lib_consumers = set()
14547
14548                                         for lib_consumer, providers in consumer_providers:
14549                                                 owner_set = file_owners.get(lib_consumer)
14550                                                 provider_dblinks = set()
14551                                                 provider_pkgs = set()
14552
14553                                                 if len(providers) > 1:
14554                                                         for provider in providers:
14555                                                                 provider_set = file_owners.get(provider)
14556                                                                 if provider_set is not None:
14557                                                                         provider_dblinks.update(provider_set)
14558
14559                                                 if len(provider_dblinks) > 1:
14560                                                         for provider_dblink in provider_dblinks:
14561                                                                 pkg_key = ("installed", myroot,
14562                                                                         provider_dblink.mycpv, "nomerge")
14563                                                                 if pkg_key not in clean_set:
14564                                                                         provider_pkgs.add(vardb.get(pkg_key))
14565
14566                                                 if provider_pkgs:
14567                                                         continue
14568
14569                                                 if owner_set is not None:
14570                                                         lib_consumers.update(owner_set)
14571
14572                                         for consumer_dblink in list(lib_consumers):
14573                                                 if ("installed", myroot, consumer_dblink.mycpv,
14574                                                         "nomerge") in clean_set:
14575                                                         lib_consumers.remove(consumer_dblink)
14576                                                         continue
14577
14578                                         if lib_consumers:
14579                                                 consumers[lib] = lib_consumers
14580                                         else:
14581                                                 del consumers[lib]
14582                                 if not consumers:
14583                                         del consumer_map[pkg]
14584
14585                 if consumer_map:
14586                         # TODO: Implement a package set for rebuilding consumer packages.
14587
14588                         msg = "In order to avoid breakage of link level " + \
14589                                 "dependencies, one or more packages will not be removed. " + \
14590                                 "This can be solved by rebuilding " + \
14591                                 "the packages that pulled them in."
14592
14593                         prefix = bad(" * ")
14594                         from textwrap import wrap
14595                         writemsg_level("".join(prefix + "%s\n" % line for \
14596                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14597
14598                         msg = []
14599                         for pkg, consumers in consumer_map.iteritems():
14600                                 unique_consumers = set(chain(*consumers.values()))
14601                                 unique_consumers = sorted(consumer.mycpv \
14602                                         for consumer in unique_consumers)
14603                                 msg.append("")
14604                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14605                                 for consumer in unique_consumers:
14606                                         msg.append("    %s" % (consumer,))
14607                         msg.append("")
14608                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14609                                 level=logging.WARNING, noiselevel=-1)
14610
14611                         # Add lib providers to the graph as children of lib consumers,
14612                         # and also add any dependencies pulled in by the provider.
14613                         writemsg_level(">>> Adding lib providers to graph...\n")
14614
14615                         for pkg, consumers in consumer_map.iteritems():
14616                                 for consumer_dblink in set(chain(*consumers.values())):
14617                                         consumer_pkg = vardb.get(("installed", myroot,
14618                                                 consumer_dblink.mycpv, "nomerge"))
14619                                         if not resolver._add_pkg(pkg,
14620                                                 Dependency(parent=consumer_pkg,
14621                                                 priority=UnmergeDepPriority(runtime=True),
14622                                                 root=pkg.root)):
14623                                                 resolver.display_problems()
14624                                                 return 1
14625
14626                         writemsg_level("\nCalculating dependencies  ")
14627                         success = resolver._complete_graph()
14628                         writemsg_level("\b\b... done!\n")
14629                         resolver.display_problems()
14630                         if not success:
14631                                 return 1
14632                         if unresolved_deps():
14633                                 return 1
14634
14635                         graph = resolver.digraph.copy()
14636                         required_pkgs_total = 0
14637                         for node in graph:
14638                                 if isinstance(node, Package):
14639                                         required_pkgs_total += 1
14640                         cleanlist = create_cleanlist()
14641                         if not cleanlist:
14642                                 return 0
14643                         clean_set = set(cleanlist)
14644
14645                 # Use a topological sort to create an unmerge order such that
14646                 # each package is unmerged before it's dependencies. This is
14647                 # necessary to avoid breaking things that may need to run
14648                 # during pkg_prerm or pkg_postrm phases.
14649
14650                 # Create a new graph to account for dependencies between the
14651                 # packages being unmerged.
14652                 graph = digraph()
14653                 del cleanlist[:]
14654
14655                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14656                 runtime = UnmergeDepPriority(runtime=True)
14657                 runtime_post = UnmergeDepPriority(runtime_post=True)
14658                 buildtime = UnmergeDepPriority(buildtime=True)
14659                 priority_map = {
14660                         "RDEPEND": runtime,
14661                         "PDEPEND": runtime_post,
14662                         "DEPEND": buildtime,
14663                 }
14664
14665                 for node in clean_set:
14666                         graph.add(node, None)
14667                         mydeps = []
14668                         node_use = node.metadata["USE"].split()
14669                         for dep_type in dep_keys:
14670                                 depstr = node.metadata[dep_type]
14671                                 if not depstr:
14672                                         continue
14673                                 try:
14674                                         portage.dep._dep_check_strict = False
14675                                         success, atoms = portage.dep_check(depstr, None, settings,
14676                                                 myuse=node_use, trees=resolver._graph_trees,
14677                                                 myroot=myroot)
14678                                 finally:
14679                                         portage.dep._dep_check_strict = True
14680                                 if not success:
14681                                         # Ignore invalid deps of packages that will
14682                                         # be uninstalled anyway.
14683                                         continue
14684
14685                                 priority = priority_map[dep_type]
14686                                 for atom in atoms:
14687                                         if not isinstance(atom, portage.dep.Atom):
14688                                                 # Ignore invalid atoms returned from dep_check().
14689                                                 continue
14690                                         if atom.blocker:
14691                                                 continue
14692                                         matches = vardb.match_pkgs(atom)
14693                                         if not matches:
14694                                                 continue
14695                                         for child_node in matches:
14696                                                 if child_node in clean_set:
14697                                                         graph.add(child_node, node, priority=priority)
14698
14699                 ordered = True
14700                 if len(graph.order) == len(graph.root_nodes()):
14701                         # If there are no dependencies between packages
14702                         # let unmerge() group them by cat/pn.
14703                         ordered = False
14704                         cleanlist = [pkg.cpv for pkg in graph.order]
14705                 else:
14706                         # Order nodes from lowest to highest overall reference count for
14707                         # optimal root node selection.
14708                         node_refcounts = {}
14709                         for node in graph.order:
14710                                 node_refcounts[node] = len(graph.parent_nodes(node))
14711                         def cmp_reference_count(node1, node2):
14712                                 return node_refcounts[node1] - node_refcounts[node2]
14713                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14714         
14715                         ignore_priority_range = [None]
14716                         ignore_priority_range.extend(
14717                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14718                         while not graph.empty():
14719                                 for ignore_priority in ignore_priority_range:
14720                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14721                                         if nodes:
14722                                                 break
14723                                 if not nodes:
14724                                         raise AssertionError("no root nodes")
14725                                 if ignore_priority is not None:
14726                                         # Some deps have been dropped due to circular dependencies,
14727                                         # so only pop one node in order do minimize the number that
14728                                         # are dropped.
14729                                         del nodes[1:]
14730                                 for node in nodes:
14731                                         graph.remove(node)
14732                                         cleanlist.append(node.cpv)
14733
14734                 unmerge(root_config, myopts, "unmerge", cleanlist,
14735                         ldpath_mtimes, ordered=ordered)
14736
14737         if action == "prune":
14738                 return
14739
14740         if not cleanlist and "--quiet" in myopts:
14741                 return
14742
14743         print "Packages installed:   "+str(len(vardb.cpv_all()))
14744         print "Packages in world:    " + \
14745                 str(len(root_config.sets["world"].getAtoms()))
14746         print "Packages in system:   " + \
14747                 str(len(root_config.sets["system"].getAtoms()))
14748         print "Required packages:    "+str(required_pkgs_total)
14749         if "--pretend" in myopts:
14750                 print "Number to remove:     "+str(len(cleanlist))
14751         else:
14752                 print "Number removed:       "+str(len(cleanlist))
14753
14754 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14755         """
14756         Construct a depgraph for the given resume list. This will raise
14757         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14758         @rtype: tuple
14759         @returns: (success, depgraph, dropped_tasks)
14760         """
14761         skip_masked = True
14762         skip_unsatisfied = True
14763         mergelist = mtimedb["resume"]["mergelist"]
14764         dropped_tasks = set()
14765         while True:
14766                 mydepgraph = depgraph(settings, trees,
14767                         myopts, myparams, spinner)
14768                 try:
14769                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14770                                 skip_masked=skip_masked)
14771                 except depgraph.UnsatisfiedResumeDep, e:
14772                         if not skip_unsatisfied:
14773                                 raise
14774
14775                         graph = mydepgraph.digraph
14776                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14777                                 for dep in e.value)
14778                         traversed_nodes = set()
14779                         unsatisfied_stack = list(unsatisfied_parents)
14780                         while unsatisfied_stack:
14781                                 pkg = unsatisfied_stack.pop()
14782                                 if pkg in traversed_nodes:
14783                                         continue
14784                                 traversed_nodes.add(pkg)
14785
14786                                 # If this package was pulled in by a parent
14787                                 # package scheduled for merge, removing this
14788                                 # package may cause the the parent package's
14789                                 # dependency to become unsatisfied.
14790                                 for parent_node in graph.parent_nodes(pkg):
14791                                         if not isinstance(parent_node, Package) \
14792                                                 or parent_node.operation not in ("merge", "nomerge"):
14793                                                 continue
14794                                         unsatisfied = \
14795                                                 graph.child_nodes(parent_node,
14796                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14797                                         if pkg in unsatisfied:
14798                                                 unsatisfied_parents[parent_node] = parent_node
14799                                                 unsatisfied_stack.append(parent_node)
14800
14801                         pruned_mergelist = []
14802                         for x in mergelist:
14803                                 if isinstance(x, list) and \
14804                                         tuple(x) not in unsatisfied_parents:
14805                                         pruned_mergelist.append(x)
14806
14807                         # If the mergelist doesn't shrink then this loop is infinite.
14808                         if len(pruned_mergelist) == len(mergelist):
14809                                 # This happens if a package can't be dropped because
14810                                 # it's already installed, but it has unsatisfied PDEPEND.
14811                                 raise
14812                         mergelist[:] = pruned_mergelist
14813
14814                         # Exclude installed packages that have been removed from the graph due
14815                         # to failure to build/install runtime dependencies after the dependent
14816                         # package has already been installed.
14817                         dropped_tasks.update(pkg for pkg in \
14818                                 unsatisfied_parents if pkg.operation != "nomerge")
14819                         mydepgraph.break_refs(unsatisfied_parents)
14820
14821                         del e, graph, traversed_nodes, \
14822                                 unsatisfied_parents, unsatisfied_stack
14823                         continue
14824                 else:
14825                         break
14826         return (success, mydepgraph, dropped_tasks)
14827
14828 def action_build(settings, trees, mtimedb,
14829         myopts, myaction, myfiles, spinner):
14830
14831         # validate the state of the resume data
14832         # so that we can make assumptions later.
14833         for k in ("resume", "resume_backup"):
14834                 if k not in mtimedb:
14835                         continue
14836                 resume_data = mtimedb[k]
14837                 if not isinstance(resume_data, dict):
14838                         del mtimedb[k]
14839                         continue
14840                 mergelist = resume_data.get("mergelist")
14841                 if not isinstance(mergelist, list):
14842                         del mtimedb[k]
14843                         continue
14844                 for x in mergelist:
14845                         if not (isinstance(x, list) and len(x) == 4):
14846                                 continue
14847                         pkg_type, pkg_root, pkg_key, pkg_action = x
14848                         if pkg_root not in trees:
14849                                 # Current $ROOT setting differs,
14850                                 # so the list must be stale.
14851                                 mergelist = None
14852                                 break
14853                 if not mergelist:
14854                         del mtimedb[k]
14855                         continue
14856                 resume_opts = resume_data.get("myopts")
14857                 if not isinstance(resume_opts, (dict, list)):
14858                         del mtimedb[k]
14859                         continue
14860                 favorites = resume_data.get("favorites")
14861                 if not isinstance(favorites, list):
14862                         del mtimedb[k]
14863                         continue
14864
14865         resume = False
14866         if "--resume" in myopts and \
14867                 ("resume" in mtimedb or
14868                 "resume_backup" in mtimedb):
14869                 resume = True
14870                 if "resume" not in mtimedb:
14871                         mtimedb["resume"] = mtimedb["resume_backup"]
14872                         del mtimedb["resume_backup"]
14873                         mtimedb.commit()
14874                 # "myopts" is a list for backward compatibility.
14875                 resume_opts = mtimedb["resume"].get("myopts", [])
14876                 if isinstance(resume_opts, list):
14877                         resume_opts = dict((k,True) for k in resume_opts)
14878                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14879                         resume_opts.pop(opt, None)
14880
14881                 # Current options always override resume_opts.
14882                 resume_opts.update(myopts)
14883                 myopts.clear()
14884                 myopts.update(resume_opts)
14885
14886                 if "--debug" in myopts:
14887                         writemsg_level("myopts %s\n" % (myopts,))
14888
14889                 # Adjust config according to options of the command being resumed.
14890                 for myroot in trees:
14891                         mysettings =  trees[myroot]["vartree"].settings
14892                         mysettings.unlock()
14893                         adjust_config(myopts, mysettings)
14894                         mysettings.lock()
14895                         del myroot, mysettings
14896
14897         ldpath_mtimes = mtimedb["ldpath"]
14898         favorites=[]
14899         merge_count = 0
14900         buildpkgonly = "--buildpkgonly" in myopts
14901         pretend = "--pretend" in myopts
14902         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14903         ask = "--ask" in myopts
14904         nodeps = "--nodeps" in myopts
14905         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14906         tree = "--tree" in myopts
14907         if nodeps and tree:
14908                 tree = False
14909                 del myopts["--tree"]
14910                 portage.writemsg(colorize("WARN", " * ") + \
14911                         "--tree is broken with --nodeps. Disabling...\n")
14912         debug = "--debug" in myopts
14913         verbose = "--verbose" in myopts
14914         quiet = "--quiet" in myopts
14915         if pretend or fetchonly:
14916                 # make the mtimedb readonly
14917                 mtimedb.filename = None
14918         if '--digest' in myopts or 'digest' in settings.features:
14919                 if '--digest' in myopts:
14920                         msg = "The --digest option"
14921                 else:
14922                         msg = "The FEATURES=digest setting"
14923
14924                 msg += " can prevent corruption from being" + \
14925                         " noticed. The `repoman manifest` command is the preferred" + \
14926                         " way to generate manifests and it is capable of doing an" + \
14927                         " entire repository or category at once."
14928                 prefix = bad(" * ")
14929                 writemsg(prefix + "\n")
14930                 from textwrap import wrap
14931                 for line in wrap(msg, 72):
14932                         writemsg("%s%s\n" % (prefix, line))
14933                 writemsg(prefix + "\n")
14934
14935         if "--quiet" not in myopts and \
14936                 ("--pretend" in myopts or "--ask" in myopts or \
14937                 "--tree" in myopts or "--verbose" in myopts):
14938                 action = ""
14939                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14940                         action = "fetched"
14941                 elif "--buildpkgonly" in myopts:
14942                         action = "built"
14943                 else:
14944                         action = "merged"
14945                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14946                         print
14947                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14948                         print
14949                 else:
14950                         print
14951                         print darkgreen("These are the packages that would be %s, in order:") % action
14952                         print
14953
14954         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14955         if not show_spinner:
14956                 spinner.update = spinner.update_quiet
14957
14958         if resume:
14959                 favorites = mtimedb["resume"].get("favorites")
14960                 if not isinstance(favorites, list):
14961                         favorites = []
14962
14963                 if show_spinner:
14964                         print "Calculating dependencies  ",
14965                 myparams = create_depgraph_params(myopts, myaction)
14966
14967                 resume_data = mtimedb["resume"]
14968                 mergelist = resume_data["mergelist"]
14969                 if mergelist and "--skipfirst" in myopts:
14970                         for i, task in enumerate(mergelist):
14971                                 if isinstance(task, list) and \
14972                                         task and task[-1] == "merge":
14973                                         del mergelist[i]
14974                                         break
14975
14976                 success = False
14977                 mydepgraph = None
14978                 try:
14979                         success, mydepgraph, dropped_tasks = resume_depgraph(
14980                                 settings, trees, mtimedb, myopts, myparams, spinner)
14981                 except (portage.exception.PackageNotFound,
14982                         depgraph.UnsatisfiedResumeDep), e:
14983                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14984                                 mydepgraph = e.depgraph
14985                         if show_spinner:
14986                                 print
14987                         from textwrap import wrap
14988                         from portage.output import EOutput
14989                         out = EOutput()
14990
14991                         resume_data = mtimedb["resume"]
14992                         mergelist = resume_data.get("mergelist")
14993                         if not isinstance(mergelist, list):
14994                                 mergelist = []
14995                         if mergelist and debug or (verbose and not quiet):
14996                                 out.eerror("Invalid resume list:")
14997                                 out.eerror("")
14998                                 indent = "  "
14999                                 for task in mergelist:
15000                                         if isinstance(task, list):
15001                                                 out.eerror(indent + str(tuple(task)))
15002                                 out.eerror("")
15003
15004                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
15005                                 out.eerror("One or more packages are either masked or " + \
15006                                         "have missing dependencies:")
15007                                 out.eerror("")
15008                                 indent = "  "
15009                                 for dep in e.value:
15010                                         if dep.atom is None:
15011                                                 out.eerror(indent + "Masked package:")
15012                                                 out.eerror(2 * indent + str(dep.parent))
15013                                                 out.eerror("")
15014                                         else:
15015                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
15016                                                 out.eerror(2 * indent + str(dep.parent))
15017                                                 out.eerror("")
15018                                 msg = "The resume list contains packages " + \
15019                                         "that are either masked or have " + \
15020                                         "unsatisfied dependencies. " + \
15021                                         "Please restart/continue " + \
15022                                         "the operation manually, or use --skipfirst " + \
15023                                         "to skip the first package in the list and " + \
15024                                         "any other packages that may be " + \
15025                                         "masked or have missing dependencies."
15026                                 for line in wrap(msg, 72):
15027                                         out.eerror(line)
15028                         elif isinstance(e, portage.exception.PackageNotFound):
15029                                 out.eerror("An expected package is " + \
15030                                         "not available: %s" % str(e))
15031                                 out.eerror("")
15032                                 msg = "The resume list contains one or more " + \
15033                                         "packages that are no longer " + \
15034                                         "available. Please restart/continue " + \
15035                                         "the operation manually."
15036                                 for line in wrap(msg, 72):
15037                                         out.eerror(line)
15038                 else:
15039                         if show_spinner:
15040                                 print "\b\b... done!"
15041
15042                 if success:
15043                         if dropped_tasks:
15044                                 portage.writemsg("!!! One or more packages have been " + \
15045                                         "dropped due to\n" + \
15046                                         "!!! masking or unsatisfied dependencies:\n\n",
15047                                         noiselevel=-1)
15048                                 for task in dropped_tasks:
15049                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
15050                                 portage.writemsg("\n", noiselevel=-1)
15051                         del dropped_tasks
15052                 else:
15053                         if mydepgraph is not None:
15054                                 mydepgraph.display_problems()
15055                         if not (ask or pretend):
15056                                 # delete the current list and also the backup
15057                                 # since it's probably stale too.
15058                                 for k in ("resume", "resume_backup"):
15059                                         mtimedb.pop(k, None)
15060                                 mtimedb.commit()
15061
15062                         return 1
15063         else:
15064                 if ("--resume" in myopts):
15065                         print darkgreen("emerge: It seems we have nothing to resume...")
15066                         return os.EX_OK
15067
15068                 myparams = create_depgraph_params(myopts, myaction)
15069                 if "--quiet" not in myopts and "--nodeps" not in myopts:
15070                         print "Calculating dependencies  ",
15071                         sys.stdout.flush()
15072                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15073                 try:
15074                         retval, favorites = mydepgraph.select_files(myfiles)
15075                 except portage.exception.PackageNotFound, e:
15076                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15077                         return 1
15078                 except portage.exception.PackageSetNotFound, e:
15079                         root_config = trees[settings["ROOT"]]["root_config"]
15080                         display_missing_pkg_set(root_config, e.value)
15081                         return 1
15082                 if show_spinner:
15083                         print "\b\b... done!"
15084                 if not retval:
15085                         mydepgraph.display_problems()
15086                         return 1
15087
15088         if "--pretend" not in myopts and \
15089                 ("--ask" in myopts or "--tree" in myopts or \
15090                 "--verbose" in myopts) and \
15091                 not ("--quiet" in myopts and "--ask" not in myopts):
15092                 if "--resume" in myopts:
15093                         mymergelist = mydepgraph.altlist()
15094                         if len(mymergelist) == 0:
15095                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15096                                 return os.EX_OK
15097                         favorites = mtimedb["resume"]["favorites"]
15098                         retval = mydepgraph.display(
15099                                 mydepgraph.altlist(reversed=tree),
15100                                 favorites=favorites)
15101                         mydepgraph.display_problems()
15102                         if retval != os.EX_OK:
15103                                 return retval
15104                         prompt="Would you like to resume merging these packages?"
15105                 else:
15106                         retval = mydepgraph.display(
15107                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15108                                 favorites=favorites)
15109                         mydepgraph.display_problems()
15110                         if retval != os.EX_OK:
15111                                 return retval
15112                         mergecount=0
15113                         for x in mydepgraph.altlist():
15114                                 if isinstance(x, Package) and x.operation == "merge":
15115                                         mergecount += 1
15116
15117                         if mergecount==0:
15118                                 sets = trees[settings["ROOT"]]["root_config"].sets
15119                                 world_candidates = None
15120                                 if "--noreplace" in myopts and \
15121                                         not oneshot and favorites:
15122                                         # Sets that are not world candidates are filtered
15123                                         # out here since the favorites list needs to be
15124                                         # complete for depgraph.loadResumeCommand() to
15125                                         # operate correctly.
15126                                         world_candidates = [x for x in favorites \
15127                                                 if not (x.startswith(SETPREFIX) and \
15128                                                 not sets[x[1:]].world_candidate)]
15129                                 if "--noreplace" in myopts and \
15130                                         not oneshot and world_candidates:
15131                                         print
15132                                         for x in world_candidates:
15133                                                 print " %s %s" % (good("*"), x)
15134                                         prompt="Would you like to add these packages to your world favorites?"
15135                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15136                                         prompt="Nothing to merge; would you like to auto-clean packages?"
15137                                 else:
15138                                         print
15139                                         print "Nothing to merge; quitting."
15140                                         print
15141                                         return os.EX_OK
15142                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15143                                 prompt="Would you like to fetch the source files for these packages?"
15144                         else:
15145                                 prompt="Would you like to merge these packages?"
15146                 print
15147                 if "--ask" in myopts and userquery(prompt) == "No":
15148                         print
15149                         print "Quitting."
15150                         print
15151                         return os.EX_OK
15152                 # Don't ask again (e.g. when auto-cleaning packages after merge)
15153                 myopts.pop("--ask", None)
15154
15155         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15156                 if ("--resume" in myopts):
15157                         mymergelist = mydepgraph.altlist()
15158                         if len(mymergelist) == 0:
15159                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15160                                 return os.EX_OK
15161                         favorites = mtimedb["resume"]["favorites"]
15162                         retval = mydepgraph.display(
15163                                 mydepgraph.altlist(reversed=tree),
15164                                 favorites=favorites)
15165                         mydepgraph.display_problems()
15166                         if retval != os.EX_OK:
15167                                 return retval
15168                 else:
15169                         retval = mydepgraph.display(
15170                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15171                                 favorites=favorites)
15172                         mydepgraph.display_problems()
15173                         if retval != os.EX_OK:
15174                                 return retval
15175                         if "--buildpkgonly" in myopts:
15176                                 graph_copy = mydepgraph.digraph.clone()
15177                                 removed_nodes = set()
15178                                 for node in graph_copy:
15179                                         if not isinstance(node, Package) or \
15180                                                 node.operation == "nomerge":
15181                                                 removed_nodes.add(node)
15182                                 graph_copy.difference_update(removed_nodes)
15183                                 if not graph_copy.hasallzeros(ignore_priority = \
15184                                         DepPrioritySatisfiedRange.ignore_medium):
15185                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
15186                                         print "!!! You have to merge the dependencies before you can build this package.\n"
15187                                         return 1
15188         else:
15189                 if "--buildpkgonly" in myopts:
15190                         graph_copy = mydepgraph.digraph.clone()
15191                         removed_nodes = set()
15192                         for node in graph_copy:
15193                                 if not isinstance(node, Package) or \
15194                                         node.operation == "nomerge":
15195                                         removed_nodes.add(node)
15196                         graph_copy.difference_update(removed_nodes)
15197                         if not graph_copy.hasallzeros(ignore_priority = \
15198                                 DepPrioritySatisfiedRange.ignore_medium):
15199                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15200                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15201                                 return 1
15202
15203                 if ("--resume" in myopts):
15204                         favorites=mtimedb["resume"]["favorites"]
15205                         mymergelist = mydepgraph.altlist()
15206                         mydepgraph.break_refs(mymergelist)
15207                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15208                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15209                         del mydepgraph, mymergelist
15210                         clear_caches(trees)
15211
15212                         retval = mergetask.merge()
15213                         merge_count = mergetask.curval
15214                 else:
15215                         if "resume" in mtimedb and \
15216                         "mergelist" in mtimedb["resume"] and \
15217                         len(mtimedb["resume"]["mergelist"]) > 1:
15218                                 mtimedb["resume_backup"] = mtimedb["resume"]
15219                                 del mtimedb["resume"]
15220                                 mtimedb.commit()
15221                         mtimedb["resume"]={}
15222                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
15223                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15224                         # a list type for options.
15225                         mtimedb["resume"]["myopts"] = myopts.copy()
15226
15227                         # Convert Atom instances to plain str.
15228                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15229
15230                         pkglist = mydepgraph.altlist()
15231                         mydepgraph.saveNomergeFavorites()
15232                         mydepgraph.break_refs(pkglist)
15233                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15234                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15235                         del mydepgraph, pkglist
15236                         clear_caches(trees)
15237
15238                         retval = mergetask.merge()
15239                         merge_count = mergetask.curval
15240
15241                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15242                         if "yes" == settings.get("AUTOCLEAN"):
15243                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15244                                 unmerge(trees[settings["ROOT"]]["root_config"],
15245                                         myopts, "clean", [],
15246                                         ldpath_mtimes, autoclean=1)
15247                         else:
15248                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15249                                         + " AUTOCLEAN is disabled.  This can cause serious"
15250                                         + " problems due to overlapping packages.\n")
15251                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15252
15253                 return retval
15254
15255 def multiple_actions(action1, action2):
15256         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15257         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15258         sys.exit(1)
15259
15260 def insert_optional_args(args):
15261         """
15262         Parse optional arguments and insert a value if one has
15263         not been provided. This is done before feeding the args
15264         to the optparse parser since that parser does not support
15265         this feature natively.
15266         """
15267
15268         new_args = []
15269         jobs_opts = ("-j", "--jobs")
15270         default_arg_opts = {
15271                 '--deselect'   : ('n',),
15272                 '--root-deps'  : ('rdeps',),
15273         }
15274         arg_stack = args[:]
15275         arg_stack.reverse()
15276         while arg_stack:
15277                 arg = arg_stack.pop()
15278
15279                 default_arg_choices = default_arg_opts.get(arg)
15280                 if default_arg_choices is not None:
15281                         new_args.append(arg)
15282                         if arg_stack and arg_stack[-1] in default_arg_choices:
15283                                 new_args.append(arg_stack.pop())
15284                         else:
15285                                 # insert default argument
15286                                 new_args.append('True')
15287                         continue
15288
15289                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15290                 if not (short_job_opt or arg in jobs_opts):
15291                         new_args.append(arg)
15292                         continue
15293
15294                 # Insert an empty placeholder in order to
15295                 # satisfy the requirements of optparse.
15296
15297                 new_args.append("--jobs")
15298                 job_count = None
15299                 saved_opts = None
15300                 if short_job_opt and len(arg) > 2:
15301                         if arg[:2] == "-j":
15302                                 try:
15303                                         job_count = int(arg[2:])
15304                                 except ValueError:
15305                                         saved_opts = arg[2:]
15306                         else:
15307                                 job_count = "True"
15308                                 saved_opts = arg[1:].replace("j", "")
15309
15310                 if job_count is None and arg_stack:
15311                         try:
15312                                 job_count = int(arg_stack[-1])
15313                         except ValueError:
15314                                 pass
15315                         else:
15316                                 # Discard the job count from the stack
15317                                 # since we're consuming it here.
15318                                 arg_stack.pop()
15319
15320                 if job_count is None:
15321                         # unlimited number of jobs
15322                         new_args.append("True")
15323                 else:
15324                         new_args.append(str(job_count))
15325
15326                 if saved_opts is not None:
15327                         new_args.append("-" + saved_opts)
15328
15329         return new_args
15330
15331 def parse_opts(tmpcmdline, silent=False):
15332         myaction=None
15333         myopts = {}
15334         myfiles=[]
15335
15336         global actions, options, shortmapping
15337
15338         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15339         argument_options = {
15340                 "--config-root": {
15341                         "help":"specify the location for portage configuration files",
15342                         "action":"store"
15343                 },
15344                 "--color": {
15345                         "help":"enable or disable color output",
15346                         "type":"choice",
15347                         "choices":("y", "n")
15348                 },
15349
15350                 "--deselect": {
15351                         "help"    : "remove atoms from the world file",
15352                         "type"    : "choice",
15353                         "choices" : ("True", "n")
15354                 },
15355
15356                 "--jobs": {
15357
15358                         "help"   : "Specifies the number of packages to build " + \
15359                                 "simultaneously.",
15360
15361                         "action" : "store"
15362                 },
15363
15364                 "--load-average": {
15365
15366                         "help"   :"Specifies that no new builds should be started " + \
15367                                 "if there are other builds running and the load average " + \
15368                                 "is at least LOAD (a floating-point number).",
15369
15370                         "action" : "store"
15371                 },
15372
15373                 "--with-bdeps": {
15374                         "help":"include unnecessary build time dependencies",
15375                         "type":"choice",
15376                         "choices":("y", "n")
15377                 },
15378                 "--reinstall": {
15379                         "help":"specify conditions to trigger package reinstallation",
15380                         "type":"choice",
15381                         "choices":["changed-use"]
15382                 },
15383                 "--root": {
15384                  "help"   : "specify the target root filesystem for merging packages",
15385                  "action" : "store"
15386                 },
15387
15388                 "--root-deps": {
15389                         "help"    : "modify interpretation of depedencies",
15390                         "type"    : "choice",
15391                         "choices" :("True", "rdeps")
15392                 },
15393         }
15394
15395         from optparse import OptionParser
15396         parser = OptionParser()
15397         if parser.has_option("--help"):
15398                 parser.remove_option("--help")
15399
15400         for action_opt in actions:
15401                 parser.add_option("--" + action_opt, action="store_true",
15402                         dest=action_opt.replace("-", "_"), default=False)
15403         for myopt in options:
15404                 parser.add_option(myopt, action="store_true",
15405                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15406         for shortopt, longopt in shortmapping.iteritems():
15407                 parser.add_option("-" + shortopt, action="store_true",
15408                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
15409         for myalias, myopt in longopt_aliases.iteritems():
15410                 parser.add_option(myalias, action="store_true",
15411                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15412
15413         for myopt, kwargs in argument_options.iteritems():
15414                 parser.add_option(myopt,
15415                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15416
15417         tmpcmdline = insert_optional_args(tmpcmdline)
15418
15419         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15420
15421         if myoptions.deselect == "True":
15422                 myoptions.deselect = True
15423
15424         if myoptions.root_deps == "True":
15425                 myoptions.root_deps = True
15426
15427         if myoptions.jobs:
15428                 jobs = None
15429                 if myoptions.jobs == "True":
15430                         jobs = True
15431                 else:
15432                         try:
15433                                 jobs = int(myoptions.jobs)
15434                         except ValueError:
15435                                 jobs = -1
15436
15437                 if jobs is not True and \
15438                         jobs < 1:
15439                         jobs = None
15440                         if not silent:
15441                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15442                                         (myoptions.jobs,), noiselevel=-1)
15443
15444                 myoptions.jobs = jobs
15445
15446         if myoptions.load_average:
15447                 try:
15448                         load_average = float(myoptions.load_average)
15449                 except ValueError:
15450                         load_average = 0.0
15451
15452                 if load_average <= 0.0:
15453                         load_average = None
15454                         if not silent:
15455                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15456                                         (myoptions.load_average,), noiselevel=-1)
15457
15458                 myoptions.load_average = load_average
15459
15460         for myopt in options:
15461                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15462                 if v:
15463                         myopts[myopt] = True
15464
15465         for myopt in argument_options:
15466                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15467                 if v is not None:
15468                         myopts[myopt] = v
15469
15470         if myoptions.searchdesc:
15471                 myoptions.search = True
15472
15473         for action_opt in actions:
15474                 v = getattr(myoptions, action_opt.replace("-", "_"))
15475                 if v:
15476                         if myaction:
15477                                 multiple_actions(myaction, action_opt)
15478                                 sys.exit(1)
15479                         myaction = action_opt
15480
15481         if myaction is None and myoptions.deselect is True:
15482                 myaction = 'deselect'
15483
15484         myfiles += myargs
15485
15486         return myaction, myopts, myfiles
15487
15488 def validate_ebuild_environment(trees):
15489         for myroot in trees:
15490                 settings = trees[myroot]["vartree"].settings
15491                 settings.validate()
15492
15493 def clear_caches(trees):
15494         for d in trees.itervalues():
15495                 d["porttree"].dbapi.melt()
15496                 d["porttree"].dbapi._aux_cache.clear()
15497                 d["bintree"].dbapi._aux_cache.clear()
15498                 d["bintree"].dbapi._clear_cache()
15499                 d["vartree"].dbapi.linkmap._clear_cache()
15500         portage.dircache.clear()
15501         gc.collect()
15502
15503 def load_emerge_config(trees=None):
15504         kwargs = {}
15505         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15506                 v = os.environ.get(envvar, None)
15507                 if v and v.strip():
15508                         kwargs[k] = v
15509         trees = portage.create_trees(trees=trees, **kwargs)
15510
15511         for root, root_trees in trees.iteritems():
15512                 settings = root_trees["vartree"].settings
15513                 setconfig = load_default_config(settings, root_trees)
15514                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15515
15516         settings = trees["/"]["vartree"].settings
15517
15518         for myroot in trees:
15519                 if myroot != "/":
15520                         settings = trees[myroot]["vartree"].settings
15521                         break
15522
15523         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15524         mtimedb = portage.MtimeDB(mtimedbfile)
15525         
15526         return settings, trees, mtimedb
15527
15528 def adjust_config(myopts, settings):
15529         """Make emerge specific adjustments to the config."""
15530
15531         # To enhance usability, make some vars case insensitive by forcing them to
15532         # lower case.
15533         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15534                 if myvar in settings:
15535                         settings[myvar] = settings[myvar].lower()
15536                         settings.backup_changes(myvar)
15537         del myvar
15538
15539         # Kill noauto as it will break merges otherwise.
15540         if "noauto" in settings.features:
15541                 settings.features.remove('noauto')
15542                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15543                 settings.backup_changes("FEATURES")
15544
15545         CLEAN_DELAY = 5
15546         try:
15547                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15548         except ValueError, e:
15549                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15550                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15551                         settings["CLEAN_DELAY"], noiselevel=-1)
15552         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15553         settings.backup_changes("CLEAN_DELAY")
15554
15555         EMERGE_WARNING_DELAY = 10
15556         try:
15557                 EMERGE_WARNING_DELAY = int(settings.get(
15558                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15559         except ValueError, e:
15560                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15561                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15562                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15563         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15564         settings.backup_changes("EMERGE_WARNING_DELAY")
15565
15566         if "--quiet" in myopts:
15567                 settings["PORTAGE_QUIET"]="1"
15568                 settings.backup_changes("PORTAGE_QUIET")
15569
15570         if "--verbose" in myopts:
15571                 settings["PORTAGE_VERBOSE"] = "1"
15572                 settings.backup_changes("PORTAGE_VERBOSE")
15573
15574         # Set so that configs will be merged regardless of remembered status
15575         if ("--noconfmem" in myopts):
15576                 settings["NOCONFMEM"]="1"
15577                 settings.backup_changes("NOCONFMEM")
15578
15579         # Set various debug markers... They should be merged somehow.
15580         PORTAGE_DEBUG = 0
15581         try:
15582                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15583                 if PORTAGE_DEBUG not in (0, 1):
15584                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15585                                 PORTAGE_DEBUG, noiselevel=-1)
15586                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15587                                 noiselevel=-1)
15588                         PORTAGE_DEBUG = 0
15589         except ValueError, e:
15590                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15591                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15592                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15593                 del e
15594         if "--debug" in myopts:
15595                 PORTAGE_DEBUG = 1
15596         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15597         settings.backup_changes("PORTAGE_DEBUG")
15598
15599         if settings.get("NOCOLOR") not in ("yes","true"):
15600                 portage.output.havecolor = 1
15601
15602         """The explicit --color < y | n > option overrides the NOCOLOR environment
15603         variable and stdout auto-detection."""
15604         if "--color" in myopts:
15605                 if "y" == myopts["--color"]:
15606                         portage.output.havecolor = 1
15607                         settings["NOCOLOR"] = "false"
15608                 else:
15609                         portage.output.havecolor = 0
15610                         settings["NOCOLOR"] = "true"
15611                 settings.backup_changes("NOCOLOR")
15612         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15613                 portage.output.havecolor = 0
15614                 settings["NOCOLOR"] = "true"
15615                 settings.backup_changes("NOCOLOR")
15616
15617 def apply_priorities(settings):
15618         ionice(settings)
15619         nice(settings)
15620
15621 def nice(settings):
15622         try:
15623                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15624         except (OSError, ValueError), e:
15625                 out = portage.output.EOutput()
15626                 out.eerror("Failed to change nice value to '%s'" % \
15627                         settings["PORTAGE_NICENESS"])
15628                 out.eerror("%s\n" % str(e))
15629
15630 def ionice(settings):
15631
15632         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15633         if ionice_cmd:
15634                 ionice_cmd = shlex.split(ionice_cmd)
15635         if not ionice_cmd:
15636                 return
15637
15638         from portage.util import varexpand
15639         variables = {"PID" : str(os.getpid())}
15640         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15641
15642         try:
15643                 rval = portage.process.spawn(cmd, env=os.environ)
15644         except portage.exception.CommandNotFound:
15645                 # The OS kernel probably doesn't support ionice,
15646                 # so return silently.
15647                 return
15648
15649         if rval != os.EX_OK:
15650                 out = portage.output.EOutput()
15651                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15652                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15653
15654 def display_missing_pkg_set(root_config, set_name):
15655
15656         msg = []
15657         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15658                 "The following sets exist:") % \
15659                 colorize("INFORM", set_name))
15660         msg.append("")
15661
15662         for s in sorted(root_config.sets):
15663                 msg.append("    %s" % s)
15664         msg.append("")
15665
15666         writemsg_level("".join("%s\n" % l for l in msg),
15667                 level=logging.ERROR, noiselevel=-1)
15668
15669 def expand_set_arguments(myfiles, myaction, root_config):
15670         retval = os.EX_OK
15671         setconfig = root_config.setconfig
15672
15673         sets = setconfig.getSets()
15674
15675         # In order to know exactly which atoms/sets should be added to the
15676         # world file, the depgraph performs set expansion later. It will get
15677         # confused about where the atoms came from if it's not allowed to
15678         # expand them itself.
15679         do_not_expand = (None, )
15680         newargs = []
15681         for a in myfiles:
15682                 if a in ("system", "world"):
15683                         newargs.append(SETPREFIX+a)
15684                 else:
15685                         newargs.append(a)
15686         myfiles = newargs
15687         del newargs
15688         newargs = []
15689
15690         # separators for set arguments
15691         ARG_START = "{"
15692         ARG_END = "}"
15693
15694         # WARNING: all operators must be of equal length
15695         IS_OPERATOR = "/@"
15696         DIFF_OPERATOR = "-@"
15697         UNION_OPERATOR = "+@"
15698         
15699         for i in range(0, len(myfiles)):
15700                 if myfiles[i].startswith(SETPREFIX):
15701                         start = 0
15702                         end = 0
15703                         x = myfiles[i][len(SETPREFIX):]
15704                         newset = ""
15705                         while x:
15706                                 start = x.find(ARG_START)
15707                                 end = x.find(ARG_END)
15708                                 if start > 0 and start < end:
15709                                         namepart = x[:start]
15710                                         argpart = x[start+1:end]
15711                                 
15712                                         # TODO: implement proper quoting
15713                                         args = argpart.split(",")
15714                                         options = {}
15715                                         for a in args:
15716                                                 if "=" in a:
15717                                                         k, v  = a.split("=", 1)
15718                                                         options[k] = v
15719                                                 else:
15720                                                         options[a] = "True"
15721                                         setconfig.update(namepart, options)
15722                                         newset += (x[:start-len(namepart)]+namepart)
15723                                         x = x[end+len(ARG_END):]
15724                                 else:
15725                                         newset += x
15726                                         x = ""
15727                         myfiles[i] = SETPREFIX+newset
15728                                 
15729         sets = setconfig.getSets()
15730
15731         # display errors that occured while loading the SetConfig instance
15732         for e in setconfig.errors:
15733                 print colorize("BAD", "Error during set creation: %s" % e)
15734         
15735         # emerge relies on the existance of sets with names "world" and "system"
15736         required_sets = ("world", "system")
15737         missing_sets = []
15738
15739         for s in required_sets:
15740                 if s not in sets:
15741                         missing_sets.append(s)
15742         if missing_sets:
15743                 if len(missing_sets) > 2:
15744                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15745                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15746                 elif len(missing_sets) == 2:
15747                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15748                 else:
15749                         missing_sets_str = '"%s"' % missing_sets[-1]
15750                 msg = ["emerge: incomplete set configuration, " + \
15751                         "missing set(s): %s" % missing_sets_str]
15752                 if sets:
15753                         msg.append("        sets defined: %s" % ", ".join(sets))
15754                 msg.append("        This usually means that '%s'" % \
15755                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15756                 msg.append("        is missing or corrupt.")
15757                 for line in msg:
15758                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15759                 return (None, 1)
15760         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15761
15762         for a in myfiles:
15763                 if a.startswith(SETPREFIX):
15764                         # support simple set operations (intersection, difference and union)
15765                         # on the commandline. Expressions are evaluated strictly left-to-right
15766                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15767                                 expression = a[len(SETPREFIX):]
15768                                 expr_sets = []
15769                                 expr_ops = []
15770                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15771                                         is_pos = expression.rfind(IS_OPERATOR)
15772                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15773                                         union_pos = expression.rfind(UNION_OPERATOR)
15774                                         op_pos = max(is_pos, diff_pos, union_pos)
15775                                         s1 = expression[:op_pos]
15776                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15777                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15778                                         if not s2 in sets:
15779                                                 display_missing_pkg_set(root_config, s2)
15780                                                 return (None, 1)
15781                                         expr_sets.insert(0, s2)
15782                                         expr_ops.insert(0, op)
15783                                         expression = s1
15784                                 if not expression in sets:
15785                                         display_missing_pkg_set(root_config, expression)
15786                                         return (None, 1)
15787                                 expr_sets.insert(0, expression)
15788                                 result = set(setconfig.getSetAtoms(expression))
15789                                 for i in range(0, len(expr_ops)):
15790                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15791                                         if expr_ops[i] == IS_OPERATOR:
15792                                                 result.intersection_update(s2)
15793                                         elif expr_ops[i] == DIFF_OPERATOR:
15794                                                 result.difference_update(s2)
15795                                         elif expr_ops[i] == UNION_OPERATOR:
15796                                                 result.update(s2)
15797                                         else:
15798                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15799                                 newargs.extend(result)
15800                         else:                   
15801                                 s = a[len(SETPREFIX):]
15802                                 if s not in sets:
15803                                         display_missing_pkg_set(root_config, s)
15804                                         return (None, 1)
15805                                 setconfig.active.append(s)
15806                                 try:
15807                                         set_atoms = setconfig.getSetAtoms(s)
15808                                 except portage.exception.PackageSetNotFound, e:
15809                                         writemsg_level(("emerge: the given set '%s' " + \
15810                                                 "contains a non-existent set named '%s'.\n") % \
15811                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15812                                         return (None, 1)
15813                                 if myaction in unmerge_actions and \
15814                                                 not sets[s].supportsOperation("unmerge"):
15815                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15816                                                 "not support unmerge operations\n")
15817                                         retval = 1
15818                                 elif not set_atoms:
15819                                         print "emerge: '%s' is an empty set" % s
15820                                 elif myaction not in do_not_expand:
15821                                         newargs.extend(set_atoms)
15822                                 else:
15823                                         newargs.append(SETPREFIX+s)
15824                                 for e in sets[s].errors:
15825                                         print e
15826                 else:
15827                         newargs.append(a)
15828         return (newargs, retval)
15829
15830 def repo_name_check(trees):
15831         missing_repo_names = set()
15832         for root, root_trees in trees.iteritems():
15833                 if "porttree" in root_trees:
15834                         portdb = root_trees["porttree"].dbapi
15835                         missing_repo_names.update(portdb.porttrees)
15836                         repos = portdb.getRepositories()
15837                         for r in repos:
15838                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15839                         if portdb.porttree_root in missing_repo_names and \
15840                                 not os.path.exists(os.path.join(
15841                                 portdb.porttree_root, "profiles")):
15842                                 # This is normal if $PORTDIR happens to be empty,
15843                                 # so don't warn about it.
15844                                 missing_repo_names.remove(portdb.porttree_root)
15845
15846         if missing_repo_names:
15847                 msg = []
15848                 msg.append("WARNING: One or more repositories " + \
15849                         "have missing repo_name entries:")
15850                 msg.append("")
15851                 for p in missing_repo_names:
15852                         msg.append("\t%s/profiles/repo_name" % (p,))
15853                 msg.append("")
15854                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15855                         "should be a plain text file containing a unique " + \
15856                         "name for the repository on the first line.", 70))
15857                 writemsg_level("".join("%s\n" % l for l in msg),
15858                         level=logging.WARNING, noiselevel=-1)
15859
15860         return bool(missing_repo_names)
15861
15862 def repo_name_duplicate_check(trees):
15863         ignored_repos = {}
15864         for root, root_trees in trees.iteritems():
15865                 if 'porttree' in root_trees:
15866                         portdb = root_trees['porttree'].dbapi
15867                         if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15868                                 for repo_name, paths in portdb._ignored_repos:
15869                                         k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15870                                         ignored_repos.setdefault(k, []).extend(paths)
15871
15872         if ignored_repos:
15873                 msg = []
15874                 msg.append('WARNING: One or more repositories ' + \
15875                         'have been ignored due to duplicate')
15876                 msg.append('  profiles/repo_name entries:')
15877                 msg.append('')
15878                 for k in sorted(ignored_repos):
15879                         msg.append('  %s overrides' % (k,))
15880                         for path in ignored_repos[k]:
15881                                 msg.append('    %s' % (path,))
15882                         msg.append('')
15883                 msg.extend('  ' + x for x in textwrap.wrap(
15884                         "All profiles/repo_name entries must be unique in order " + \
15885                         "to avoid having duplicates ignored. " + \
15886                         "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15887                         "/etc/make.conf if you would like to disable this warning."))
15888                 writemsg_level(''.join('%s\n' % l for l in msg),
15889                         level=logging.WARNING, noiselevel=-1)
15890
15891         return bool(ignored_repos)
15892
15893 def config_protect_check(trees):
15894         for root, root_trees in trees.iteritems():
15895                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15896                         msg = "!!! CONFIG_PROTECT is empty"
15897                         if root != "/":
15898                                 msg += " for '%s'" % root
15899                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15900
15901 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15902
15903         if "--quiet" in myopts:
15904                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15905                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15906                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15907                         print "    " + colorize("INFORM", cp)
15908                 return
15909
15910         s = search(root_config, spinner, "--searchdesc" in myopts,
15911                 "--quiet" not in myopts, "--usepkg" in myopts,
15912                 "--usepkgonly" in myopts)
15913         null_cp = portage.dep_getkey(insert_category_into_atom(
15914                 arg, "null"))
15915         cat, atom_pn = portage.catsplit(null_cp)
15916         s.searchkey = atom_pn
15917         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15918                 s.addCP(cp)
15919         s.output()
15920         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15921         print "!!! one of the above fully-qualified ebuild names instead.\n"
15922
15923 def profile_check(trees, myaction, myopts):
15924         if myaction in ("info", "sync"):
15925                 return os.EX_OK
15926         elif "--version" in myopts or "--help" in myopts:
15927                 return os.EX_OK
15928         for root, root_trees in trees.iteritems():
15929                 if root_trees["root_config"].settings.profiles:
15930                         continue
15931                 # generate some profile related warning messages
15932                 validate_ebuild_environment(trees)
15933                 msg = "If you have just changed your profile configuration, you " + \
15934                         "should revert back to the previous configuration. Due to " + \
15935                         "your current profile being invalid, allowed actions are " + \
15936                         "limited to --help, --info, --sync, and --version."
15937                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15938                         level=logging.ERROR, noiselevel=-1)
15939                 return 1
15940         return os.EX_OK
15941
15942 def emerge_main():
15943         global portage  # NFC why this is necessary now - genone
15944         portage._disable_legacy_globals()
15945         # Disable color until we're sure that it should be enabled (after
15946         # EMERGE_DEFAULT_OPTS has been parsed).
15947         portage.output.havecolor = 0
15948         # This first pass is just for options that need to be known as early as
15949         # possible, such as --config-root.  They will be parsed again later,
15950         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15951         # the value of --config-root).
15952         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15953         if "--debug" in myopts:
15954                 os.environ["PORTAGE_DEBUG"] = "1"
15955         if "--config-root" in myopts:
15956                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15957         if "--root" in myopts:
15958                 os.environ["ROOT"] = myopts["--root"]
15959
15960         # Portage needs to ensure a sane umask for the files it creates.
15961         os.umask(022)
15962         settings, trees, mtimedb = load_emerge_config()
15963         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15964         rval = profile_check(trees, myaction, myopts)
15965         if rval != os.EX_OK:
15966                 return rval
15967
15968         if portage._global_updates(trees, mtimedb["updates"]):
15969                 mtimedb.commit()
15970                 # Reload the whole config from scratch.
15971                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15972                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15973
15974         xterm_titles = "notitles" not in settings.features
15975
15976         tmpcmdline = []
15977         if "--ignore-default-opts" not in myopts:
15978                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15979         tmpcmdline.extend(sys.argv[1:])
15980         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15981
15982         if "--digest" in myopts:
15983                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15984                 # Reload the whole config from scratch so that the portdbapi internal
15985                 # config is updated with new FEATURES.
15986                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15987                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15988
15989         for myroot in trees:
15990                 mysettings =  trees[myroot]["vartree"].settings
15991                 mysettings.unlock()
15992                 adjust_config(myopts, mysettings)
15993                 if '--pretend' not in myopts and myaction in \
15994                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15995                         mysettings["PORTAGE_COUNTER_HASH"] = \
15996                                 trees[myroot]["vartree"].dbapi._counter_hash()
15997                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15998                 mysettings.lock()
15999                 del myroot, mysettings
16000
16001         apply_priorities(settings)
16002
16003         spinner = stdout_spinner()
16004         if "candy" in settings.features:
16005                 spinner.update = spinner.update_scroll
16006
16007         if "--quiet" not in myopts:
16008                 portage.deprecated_profile_check(settings=settings)
16009                 repo_name_check(trees)
16010                 repo_name_duplicate_check(trees)
16011                 config_protect_check(trees)
16012
16013         for mytrees in trees.itervalues():
16014                 mydb = mytrees["porttree"].dbapi
16015                 # Freeze the portdbapi for performance (memoize all xmatch results).
16016                 mydb.freeze()
16017         del mytrees, mydb
16018
16019         if "moo" in myfiles:
16020                 print """
16021
16022   Larry loves Gentoo (""" + platform.system() + """)
16023
16024  _______________________
16025 < Have you mooed today? >
16026  -----------------------
16027         \   ^__^
16028          \  (oo)\_______
16029             (__)\       )\/\ 
16030                 ||----w |
16031                 ||     ||
16032
16033 """
16034
16035         for x in myfiles:
16036                 ext = os.path.splitext(x)[1]
16037                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16038                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16039                         break
16040
16041         root_config = trees[settings["ROOT"]]["root_config"]
16042         if myaction == "list-sets":
16043                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16044                 sys.stdout.flush()
16045                 return os.EX_OK
16046
16047         # only expand sets for actions taking package arguments
16048         oldargs = myfiles[:]
16049         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16050                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16051                 if retval != os.EX_OK:
16052                         return retval
16053
16054                 # Need to handle empty sets specially, otherwise emerge will react 
16055                 # with the help message for empty argument lists
16056                 if oldargs and not myfiles:
16057                         print "emerge: no targets left after set expansion"
16058                         return 0
16059
16060         if ("--tree" in myopts) and ("--columns" in myopts):
16061                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16062                 return 1
16063
16064         if ("--quiet" in myopts):
16065                 spinner.update = spinner.update_quiet
16066                 portage.util.noiselimit = -1
16067
16068         # Always create packages if FEATURES=buildpkg
16069         # Imply --buildpkg if --buildpkgonly
16070         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16071                 if "--buildpkg" not in myopts:
16072                         myopts["--buildpkg"] = True
16073
16074         # Always try and fetch binary packages if FEATURES=getbinpkg
16075         if ("getbinpkg" in settings.features):
16076                 myopts["--getbinpkg"] = True
16077
16078         if "--buildpkgonly" in myopts:
16079                 # --buildpkgonly will not merge anything, so
16080                 # it cancels all binary package options.
16081                 for opt in ("--getbinpkg", "--getbinpkgonly",
16082                         "--usepkg", "--usepkgonly"):
16083                         myopts.pop(opt, None)
16084
16085         if "--fetch-all-uri" in myopts:
16086                 myopts["--fetchonly"] = True
16087
16088         if "--skipfirst" in myopts and "--resume" not in myopts:
16089                 myopts["--resume"] = True
16090
16091         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16092                 myopts["--usepkgonly"] = True
16093
16094         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16095                 myopts["--getbinpkg"] = True
16096
16097         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16098                 myopts["--usepkg"] = True
16099
16100         # Also allow -K to apply --usepkg/-k
16101         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16102                 myopts["--usepkg"] = True
16103
16104         # Allow -p to remove --ask
16105         if ("--pretend" in myopts) and ("--ask" in myopts):
16106                 print ">>> --pretend disables --ask... removing --ask from options."
16107                 del myopts["--ask"]
16108
16109         # forbid --ask when not in a terminal
16110         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16111         if ("--ask" in myopts) and (not sys.stdin.isatty()):
16112                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16113                         noiselevel=-1)
16114                 return 1
16115
16116         if settings.get("PORTAGE_DEBUG", "") == "1":
16117                 spinner.update = spinner.update_quiet
16118                 portage.debug=1
16119                 if "python-trace" in settings.features:
16120                         import portage.debug
16121                         portage.debug.set_trace(True)
16122
16123         if not ("--quiet" in myopts):
16124                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16125                         spinner.update = spinner.update_basic
16126
16127         if myaction == 'version':
16128                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16129                         settings.profile_path, settings["CHOST"],
16130                         trees[settings["ROOT"]]["vartree"].dbapi)
16131                 return 0
16132         elif "--help" in myopts:
16133                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16134                 return 0
16135
16136         if "--debug" in myopts:
16137                 print "myaction", myaction
16138                 print "myopts", myopts
16139
16140         if not myaction and not myfiles and "--resume" not in myopts:
16141                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16142                 return 1
16143
16144         pretend = "--pretend" in myopts
16145         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16146         buildpkgonly = "--buildpkgonly" in myopts
16147
16148         # check if root user is the current user for the actions where emerge needs this
16149         if portage.secpass < 2:
16150                 # We've already allowed "--version" and "--help" above.
16151                 if "--pretend" not in myopts and myaction not in ("search","info"):
16152                         need_superuser = myaction in ('clean', 'depclean', 'deselect',
16153                                 'prune', 'unmerge') or not \
16154                                 (fetchonly or \
16155                                 (buildpkgonly and secpass >= 1) or \
16156                                 myaction in ("metadata", "regen") or \
16157                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16158                         if portage.secpass < 1 or \
16159                                 need_superuser:
16160                                 if need_superuser:
16161                                         access_desc = "superuser"
16162                                 else:
16163                                         access_desc = "portage group"
16164                                 # Always show portage_group_warning() when only portage group
16165                                 # access is required but the user is not in the portage group.
16166                                 from portage.data import portage_group_warning
16167                                 if "--ask" in myopts:
16168                                         myopts["--pretend"] = True
16169                                         del myopts["--ask"]
16170                                         print ("%s access is required... " + \
16171                                                 "adding --pretend to options.\n") % access_desc
16172                                         if portage.secpass < 1 and not need_superuser:
16173                                                 portage_group_warning()
16174                                 else:
16175                                         sys.stderr.write(("emerge: %s access is " + \
16176                                                 "required.\n\n") % access_desc)
16177                                         if portage.secpass < 1 and not need_superuser:
16178                                                 portage_group_warning()
16179                                         return 1
16180
16181         disable_emergelog = False
16182         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16183                 if x in myopts:
16184                         disable_emergelog = True
16185                         break
16186         if myaction in ("search", "info"):
16187                 disable_emergelog = True
16188         if disable_emergelog:
16189                 """ Disable emergelog for everything except build or unmerge
16190                 operations.  This helps minimize parallel emerge.log entries that can
16191                 confuse log parsers.  We especially want it disabled during
16192                 parallel-fetch, which uses --resume --fetchonly."""
16193                 global emergelog
16194                 def emergelog(*pargs, **kargs):
16195                         pass
16196
16197         if not "--pretend" in myopts:
16198                 emergelog(xterm_titles, "Started emerge on: "+\
16199                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16200                 myelogstr=""
16201                 if myopts:
16202                         myelogstr=" ".join(myopts)
16203                 if myaction:
16204                         myelogstr+=" "+myaction
16205                 if myfiles:
16206                         myelogstr += " " + " ".join(oldargs)
16207                 emergelog(xterm_titles, " *** emerge " + myelogstr)
16208         del oldargs
16209
16210         def emergeexitsig(signum, frame):
16211                 signal.signal(signal.SIGINT, signal.SIG_IGN)
16212                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16213                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16214                 sys.exit(100+signum)
16215         signal.signal(signal.SIGINT, emergeexitsig)
16216         signal.signal(signal.SIGTERM, emergeexitsig)
16217
16218         def emergeexit():
16219                 """This gets out final log message in before we quit."""
16220                 if "--pretend" not in myopts:
16221                         emergelog(xterm_titles, " *** terminating.")
16222                 if "notitles" not in settings.features:
16223                         xtermTitleReset()
16224         portage.atexit_register(emergeexit)
16225
16226         if myaction in ("config", "metadata", "regen", "sync"):
16227                 if "--pretend" in myopts:
16228                         sys.stderr.write(("emerge: The '%s' action does " + \
16229                                 "not support '--pretend'.\n") % myaction)
16230                         return 1
16231
16232         if "sync" == myaction:
16233                 return action_sync(settings, trees, mtimedb, myopts, myaction)
16234         elif "metadata" == myaction:
16235                 action_metadata(settings, portdb, myopts)
16236         elif myaction=="regen":
16237                 validate_ebuild_environment(trees)
16238                 return action_regen(settings, portdb, myopts.get("--jobs"),
16239                         myopts.get("--load-average"))
16240         # HELP action
16241         elif "config"==myaction:
16242                 validate_ebuild_environment(trees)
16243                 action_config(settings, trees, myopts, myfiles)
16244
16245         # SEARCH action
16246         elif "search"==myaction:
16247                 validate_ebuild_environment(trees)
16248                 action_search(trees[settings["ROOT"]]["root_config"],
16249                         myopts, myfiles, spinner)
16250
16251         elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16252                 validate_ebuild_environment(trees)
16253                 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16254                         myopts, myaction, myfiles, spinner)
16255                 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16256                         post_emerge(root_config, myopts, mtimedb, rval)
16257                 return rval
16258
16259         elif myaction == 'info':
16260
16261                 # Ensure atoms are valid before calling unmerge().
16262                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16263                 valid_atoms = []
16264                 for x in myfiles:
16265                         if is_valid_package_atom(x):
16266                                 try:
16267                                         valid_atoms.append(
16268                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
16269                                 except portage.exception.AmbiguousPackageName, e:
16270                                         msg = "The short ebuild name \"" + x + \
16271                                                 "\" is ambiguous.  Please specify " + \
16272                                                 "one of the following " + \
16273                                                 "fully-qualified ebuild names instead:"
16274                                         for line in textwrap.wrap(msg, 70):
16275                                                 writemsg_level("!!! %s\n" % (line,),
16276                                                         level=logging.ERROR, noiselevel=-1)
16277                                         for i in e[0]:
16278                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
16279                                                         level=logging.ERROR, noiselevel=-1)
16280                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16281                                         return 1
16282                                 continue
16283                         msg = []
16284                         msg.append("'%s' is not a valid package atom." % (x,))
16285                         msg.append("Please check ebuild(5) for full details.")
16286                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16287                                 level=logging.ERROR, noiselevel=-1)
16288                         return 1
16289
16290                 return action_info(settings, trees, myopts, valid_atoms)
16291
16292         # "update", "system", or just process files:
16293         else:
16294                 validate_ebuild_environment(trees)
16295
16296                 for x in myfiles:
16297                         if x.startswith(SETPREFIX) or \
16298                                 is_valid_package_atom(x):
16299                                 continue
16300                         if x[:1] == os.sep:
16301                                 continue
16302                         try:
16303                                 os.lstat(x)
16304                                 continue
16305                         except OSError:
16306                                 pass
16307                         msg = []
16308                         msg.append("'%s' is not a valid package atom." % (x,))
16309                         msg.append("Please check ebuild(5) for full details.")
16310                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16311                                 level=logging.ERROR, noiselevel=-1)
16312                         return 1
16313
16314                 if "--pretend" not in myopts:
16315                         display_news_notification(root_config, myopts)
16316                 retval = action_build(settings, trees, mtimedb,
16317                         myopts, myaction, myfiles, spinner)
16318                 root_config = trees[settings["ROOT"]]["root_config"]
16319                 post_emerge(root_config, myopts, mtimedb, retval)
16320
16321                 return retval