Bug #275047 - Split _emerge/__init__.py into smaller pieces. Thanks to
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import formatter
7 import logging
8 import pwd
9 import select
10 import shlex
11 import signal
12 import sys
13 import textwrap
14 import weakref
15 import gc
16 import os, stat
17 import platform
18
19 try:
20         import portage
21 except ImportError:
22         from os import path as osp
23         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
24         import portage
25
26 from portage import digraph
27 from portage.const import NEWS_LIB_PATH
28
29 import _emerge.help
30 import portage.xpak, commands, errno, re, socket, time
31 from portage.output import blue, bold, colorize, darkblue, darkgreen, green, \
32         nc_len, red, teal, turquoise, xtermTitle, \
33         xtermTitleReset, yellow
34 from portage.output import create_color_func
35 good = create_color_func("GOOD")
36 bad = create_color_func("BAD")
37 # white looks bad on terminals with white background
38 from portage.output import bold as white
39
40 import portage.elog
41 import portage.dep
42 portage.dep._dep_check_strict = True
43 import portage.util
44 import portage.locks
45 import portage.exception
46 from portage.cache.cache_errors import CacheError
47 from portage.data import secpass
48 from portage.elog.messages import eerror
49 from portage.util import normalize_path as normpath
50 from portage.util import cmp_sort_key, writemsg, writemsg_level
51 from portage.sets import load_default_config, SETPREFIX
52 from portage.sets.base import InternalPackageSet
53
54 from itertools import chain, izip
55
56 from _emerge.SlotObject import SlotObject
57 from _emerge.DepPriority import DepPriority
58 from _emerge.BlockerDepPriority import BlockerDepPriority
59 from _emerge.UnmergeDepPriority import UnmergeDepPriority
60 from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
61 from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
62 from _emerge.Task import Task
63 from _emerge.Blocker import Blocker
64 from _emerge.PollConstants import PollConstants
65 from _emerge.AsynchronousTask import AsynchronousTask
66 from _emerge.CompositeTask import CompositeTask
67 from _emerge.EbuildFetcher import EbuildFetcher
68 from _emerge.EbuildBuild import EbuildBuild
69 from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
70 from _emerge.EbuildPhase import EbuildPhase
71 from _emerge.Binpkg import Binpkg
72 from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
73 from _emerge.PackageMerge import PackageMerge
74 from _emerge.DependencyArg import DependencyArg
75 from _emerge.AtomArg import AtomArg
76 from _emerge.PackageArg import PackageArg
77 from _emerge.SetArg import SetArg
78 from _emerge.Dependency import Dependency
79 from _emerge.BlockerCache import BlockerCache
80 from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
81 from _emerge.RepoDisplay import RepoDisplay
82 from _emerge.UseFlagDisplay import UseFlagDisplay
83 from _emerge.PollSelectAdapter import PollSelectAdapter
84 from _emerge.SequentialTaskQueue import SequentialTaskQueue
85 from _emerge.ProgressHandler import ProgressHandler
86
87 try:
88         from cStringIO import StringIO
89 except ImportError:
90         from StringIO import StringIO
91
92 class stdout_spinner(object):
93         scroll_msgs = [
94                 "Gentoo Rocks ("+platform.system()+")",
95                 "Thank you for using Gentoo. :)",
96                 "Are you actually trying to read this?",
97                 "How many times have you stared at this?",
98                 "We are generating the cache right now",
99                 "You are paying too much attention.",
100                 "A theory is better than its explanation.",
101                 "Phasers locked on target, Captain.",
102                 "Thrashing is just virtual crashing.",
103                 "To be is to program.",
104                 "Real Users hate Real Programmers.",
105                 "When all else fails, read the instructions.",
106                 "Functionality breeds Contempt.",
107                 "The future lies ahead.",
108                 "3.1415926535897932384626433832795028841971694",
109                 "Sometimes insanity is the only alternative.",
110                 "Inaccuracy saves a world of explanation.",
111         ]
112
113         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
114
115         def __init__(self):
116                 self.spinpos = 0
117                 self.update = self.update_twirl
118                 self.scroll_sequence = self.scroll_msgs[
119                         int(time.time() * 100) % len(self.scroll_msgs)]
120                 self.last_update = 0
121                 self.min_display_latency = 0.05
122
123         def _return_early(self):
124                 """
125                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
126                 each update* method should return without doing any output when this
127                 method returns True.
128                 """
129                 cur_time = time.time()
130                 if cur_time - self.last_update < self.min_display_latency:
131                         return True
132                 self.last_update = cur_time
133                 return False
134
135         def update_basic(self):
136                 self.spinpos = (self.spinpos + 1) % 500
137                 if self._return_early():
138                         return
139                 if (self.spinpos % 100) == 0:
140                         if self.spinpos == 0:
141                                 sys.stdout.write(". ")
142                         else:
143                                 sys.stdout.write(".")
144                 sys.stdout.flush()
145
146         def update_scroll(self):
147                 if self._return_early():
148                         return
149                 if(self.spinpos >= len(self.scroll_sequence)):
150                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
151                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
152                 else:
153                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
154                 sys.stdout.flush()
155                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
156
157         def update_twirl(self):
158                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
159                 if self._return_early():
160                         return
161                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
162                 sys.stdout.flush()
163
164         def update_quiet(self):
165                 return
166
167 def userquery(prompt, responses=None, colours=None):
168         """Displays a prompt and a set of responses, then waits for a response
169         which is checked against the responses and the first to match is
170         returned.  An empty response will match the first value in responses.  The
171         input buffer is *not* cleared prior to the prompt!
172
173         prompt: a String.
174         responses: a List of Strings.
175         colours: a List of Functions taking and returning a String, used to
176         process the responses for display. Typically these will be functions
177         like red() but could be e.g. lambda x: "DisplayString".
178         If responses is omitted, defaults to ["Yes", "No"], [green, red].
179         If only colours is omitted, defaults to [bold, ...].
180
181         Returns a member of the List responses. (If called without optional
182         arguments, returns "Yes" or "No".)
183         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
184         printed."""
185         if responses is None:
186                 responses = ["Yes", "No"]
187                 colours = [
188                         create_color_func("PROMPT_CHOICE_DEFAULT"),
189                         create_color_func("PROMPT_CHOICE_OTHER")
190                 ]
191         elif colours is None:
192                 colours=[bold]
193         colours=(colours*len(responses))[:len(responses)]
194         print bold(prompt),
195         try:
196                 while True:
197                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
198                         for key in responses:
199                                 # An empty response will match the first value in responses.
200                                 if response.upper()==key[:len(response)].upper():
201                                         return key
202                         print "Sorry, response '%s' not understood." % response,
203         except (EOFError, KeyboardInterrupt):
204                 print "Interrupted."
205                 sys.exit(1)
206
207 actions = frozenset([
208 "clean", "config", "depclean",
209 "info", "list-sets", "metadata",
210 "prune", "regen",  "search",
211 "sync",  "unmerge", "version",
212 ])
213 options=[
214 "--ask",          "--alphabetical",
215 "--buildpkg",     "--buildpkgonly",
216 "--changelog",    "--columns",
217 "--complete-graph",
218 "--debug",        "--deep",
219 "--digest",
220 "--emptytree",
221 "--fetchonly",    "--fetch-all-uri",
222 "--getbinpkg",    "--getbinpkgonly",
223 "--help",         "--ignore-default-opts",
224 "--keep-going",
225 "--noconfmem",
226 "--newuse",
227 "--nodeps",       "--noreplace",
228 "--nospinner",    "--oneshot",
229 "--onlydeps",     "--pretend",
230 "--quiet",        "--resume",
231 "--searchdesc",   "--selective",
232 "--skipfirst",
233 "--tree",
234 "--update",
235 "--usepkg",       "--usepkgonly",
236 "--verbose",
237 ]
238
239 shortmapping={
240 "1":"--oneshot",
241 "a":"--ask",
242 "b":"--buildpkg",  "B":"--buildpkgonly",
243 "c":"--clean",     "C":"--unmerge",
244 "d":"--debug",     "D":"--deep",
245 "e":"--emptytree",
246 "f":"--fetchonly", "F":"--fetch-all-uri",
247 "g":"--getbinpkg", "G":"--getbinpkgonly",
248 "h":"--help",
249 "k":"--usepkg",    "K":"--usepkgonly",
250 "l":"--changelog",
251 "n":"--noreplace", "N":"--newuse",
252 "o":"--onlydeps",  "O":"--nodeps",
253 "p":"--pretend",   "P":"--prune",
254 "q":"--quiet",
255 "s":"--search",    "S":"--searchdesc",
256 "t":"--tree",
257 "u":"--update",
258 "v":"--verbose",   "V":"--version"
259 }
260
261 _emerge_log_dir = '/var/log'
262
263 def emergelog(xterm_titles, mystr, short_msg=None):
264         if xterm_titles and short_msg:
265                 if "HOSTNAME" in os.environ:
266                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
267                 xtermTitle(short_msg)
268         try:
269                 file_path = os.path.join(_emerge_log_dir, 'emerge.log')
270                 mylogfile = open(file_path, "a")
271                 portage.util.apply_secpass_permissions(file_path,
272                         uid=portage.portage_uid, gid=portage.portage_gid,
273                         mode=0660)
274                 mylock = None
275                 try:
276                         mylock = portage.locks.lockfile(mylogfile)
277                         # seek because we may have gotten held up by the lock.
278                         # if so, we may not be positioned at the end of the file.
279                         mylogfile.seek(0, 2)
280                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
281                         mylogfile.flush()
282                 finally:
283                         if mylock:
284                                 portage.locks.unlockfile(mylock)
285                         mylogfile.close()
286         except (IOError,OSError,portage.exception.PortageException), e:
287                 if secpass >= 1:
288                         print >> sys.stderr, "emergelog():",e
289
290 def countdown(secs=5, doing="Starting"):
291         if secs:
292                 print ">>> Waiting",secs,"seconds before starting..."
293                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
294                 ticks=range(secs)
295                 ticks.reverse()
296                 for sec in ticks:
297                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
298                         sys.stdout.flush()
299                         time.sleep(1)
300                 print
301
302 # formats a size given in bytes nicely
303 def format_size(mysize):
304         if isinstance(mysize, basestring):
305                 return mysize
306         if 0 != mysize % 1024:
307                 # Always round up to the next kB so that it doesn't show 0 kB when
308                 # some small file still needs to be fetched.
309                 mysize += 1024 - mysize % 1024
310         mystr=str(mysize/1024)
311         mycount=len(mystr)
312         while (mycount > 3):
313                 mycount-=3
314                 mystr=mystr[:mycount]+","+mystr[mycount:]
315         return mystr+" kB"
316
317
318 def getgccversion(chost):
319         """
320         rtype: C{str}
321         return:  the current in-use gcc version
322         """
323
324         gcc_ver_command = 'gcc -dumpversion'
325         gcc_ver_prefix = 'gcc-'
326
327         gcc_not_found_error = red(
328         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
329         "!!! to update the environment of this terminal and possibly\n" +
330         "!!! other terminals also.\n"
331         )
332
333         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
334         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
335                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
336
337         mystatus, myoutput = commands.getstatusoutput(
338                 chost + "-" + gcc_ver_command)
339         if mystatus == os.EX_OK:
340                 return gcc_ver_prefix + myoutput
341
342         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
343         if mystatus == os.EX_OK:
344                 return gcc_ver_prefix + myoutput
345
346         portage.writemsg(gcc_not_found_error, noiselevel=-1)
347         return "[unavailable]"
348
349 def getportageversion(portdir, target_root, profile, chost, vardb):
350         profilever = "unavailable"
351         if profile:
352                 realpath = os.path.realpath(profile)
353                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
354                 if realpath.startswith(basepath):
355                         profilever = realpath[1 + len(basepath):]
356                 else:
357                         try:
358                                 profilever = "!" + os.readlink(profile)
359                         except (OSError):
360                                 pass
361                 del realpath, basepath
362
363         libcver=[]
364         libclist  = vardb.match("virtual/libc")
365         libclist += vardb.match("virtual/glibc")
366         libclist  = portage.util.unique_array(libclist)
367         for x in libclist:
368                 xs=portage.catpkgsplit(x)
369                 if libcver:
370                         libcver+=","+"-".join(xs[1:])
371                 else:
372                         libcver="-".join(xs[1:])
373         if libcver==[]:
374                 libcver="unavailable"
375
376         gccver = getgccversion(chost)
377         unameout=platform.release()+" "+platform.machine()
378
379         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
380
381 def create_depgraph_params(myopts, myaction):
382         #configure emerge engine parameters
383         #
384         # self:      include _this_ package regardless of if it is merged.
385         # selective: exclude the package if it is merged
386         # recurse:   go into the dependencies
387         # deep:      go into the dependencies of already merged packages
388         # empty:     pretend nothing is merged
389         # complete:  completely account for all known dependencies
390         # remove:    build graph for use in removing packages
391         myparams = set(["recurse"])
392
393         if myaction == "remove":
394                 myparams.add("remove")
395                 myparams.add("complete")
396                 return myparams
397
398         if "--update" in myopts or \
399                 "--newuse" in myopts or \
400                 "--reinstall" in myopts or \
401                 "--noreplace" in myopts:
402                 myparams.add("selective")
403         if "--emptytree" in myopts:
404                 myparams.add("empty")
405                 myparams.discard("selective")
406         if "--nodeps" in myopts:
407                 myparams.discard("recurse")
408         if "--deep" in myopts:
409                 myparams.add("deep")
410         if "--complete-graph" in myopts:
411                 myparams.add("complete")
412         return myparams
413
414 # search functionality
415 class search(object):
416
417         #
418         # class constants
419         #
420         VERSION_SHORT=1
421         VERSION_RELEASE=2
422
423         #
424         # public interface
425         #
426         def __init__(self, root_config, spinner, searchdesc,
427                 verbose, usepkg, usepkgonly):
428                 """Searches the available and installed packages for the supplied search key.
429                 The list of available and installed packages is created at object instantiation.
430                 This makes successive searches faster."""
431                 self.settings = root_config.settings
432                 self.vartree = root_config.trees["vartree"]
433                 self.spinner = spinner
434                 self.verbose = verbose
435                 self.searchdesc = searchdesc
436                 self.root_config = root_config
437                 self.setconfig = root_config.setconfig
438                 self.matches = {"pkg" : []}
439                 self.mlen = 0
440
441                 def fake_portdb():
442                         pass
443                 self.portdb = fake_portdb
444                 for attrib in ("aux_get", "cp_all",
445                         "xmatch", "findname", "getFetchMap"):
446                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
447
448                 self._dbs = []
449
450                 portdb = root_config.trees["porttree"].dbapi
451                 bindb = root_config.trees["bintree"].dbapi
452                 vardb = root_config.trees["vartree"].dbapi
453
454                 if not usepkgonly and portdb._have_root_eclass_dir:
455                         self._dbs.append(portdb)
456
457                 if (usepkg or usepkgonly) and bindb.cp_all():
458                         self._dbs.append(bindb)
459
460                 self._dbs.append(vardb)
461                 self._portdb = portdb
462
463         def _cp_all(self):
464                 cp_all = set()
465                 for db in self._dbs:
466                         cp_all.update(db.cp_all())
467                 return list(sorted(cp_all))
468
469         def _aux_get(self, *args, **kwargs):
470                 for db in self._dbs:
471                         try:
472                                 return db.aux_get(*args, **kwargs)
473                         except KeyError:
474                                 pass
475                 raise
476
477         def _findname(self, *args, **kwargs):
478                 for db in self._dbs:
479                         if db is not self._portdb:
480                                 # We don't want findname to return anything
481                                 # unless it's an ebuild in a portage tree.
482                                 # Otherwise, it's already built and we don't
483                                 # care about it.
484                                 continue
485                         func = getattr(db, "findname", None)
486                         if func:
487                                 value = func(*args, **kwargs)
488                                 if value:
489                                         return value
490                 return None
491
492         def _getFetchMap(self, *args, **kwargs):
493                 for db in self._dbs:
494                         func = getattr(db, "getFetchMap", None)
495                         if func:
496                                 value = func(*args, **kwargs)
497                                 if value:
498                                         return value
499                 return {}
500
501         def _visible(self, db, cpv, metadata):
502                 installed = db is self.vartree.dbapi
503                 built = installed or db is not self._portdb
504                 pkg_type = "ebuild"
505                 if installed:
506                         pkg_type = "installed"
507                 elif built:
508                         pkg_type = "binary"
509                 return visible(self.settings,
510                         Package(type_name=pkg_type, root_config=self.root_config,
511                         cpv=cpv, built=built, installed=installed, metadata=metadata))
512
513         def _xmatch(self, level, atom):
514                 """
515                 This method does not expand old-style virtuals because it
516                 is restricted to returning matches for a single ${CATEGORY}/${PN}
517                 and old-style virual matches unreliable for that when querying
518                 multiple package databases. If necessary, old-style virtuals
519                 can be performed on atoms prior to calling this method.
520                 """
521                 cp = portage.dep_getkey(atom)
522                 if level == "match-all":
523                         matches = set()
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         matches.update(db.xmatch(level, atom))
527                                 else:
528                                         matches.update(db.match(atom))
529                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
530                         db._cpv_sort_ascending(result)
531                 elif level == "match-visible":
532                         matches = set()
533                         for db in self._dbs:
534                                 if hasattr(db, "xmatch"):
535                                         matches.update(db.xmatch(level, atom))
536                                 else:
537                                         db_keys = list(db._aux_cache_keys)
538                                         for cpv in db.match(atom):
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 matches.add(cpv)
544                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
545                         db._cpv_sort_ascending(result)
546                 elif level == "bestmatch-visible":
547                         result = None
548                         for db in self._dbs:
549                                 if hasattr(db, "xmatch"):
550                                         cpv = db.xmatch("bestmatch-visible", atom)
551                                         if not cpv or portage.cpv_getkey(cpv) != cp:
552                                                 continue
553                                         if not result or cpv == portage.best([cpv, result]):
554                                                 result = cpv
555                                 else:
556                                         db_keys = Package.metadata_keys
557                                         # break out of this loop with highest visible
558                                         # match, checked in descending order
559                                         for cpv in reversed(db.match(atom)):
560                                                 if portage.cpv_getkey(cpv) != cp:
561                                                         continue
562                                                 metadata = izip(db_keys,
563                                                         db.aux_get(cpv, db_keys))
564                                                 if not self._visible(db, cpv, metadata):
565                                                         continue
566                                                 if not result or cpv == portage.best([cpv, result]):
567                                                         result = cpv
568                                                 break
569                 else:
570                         raise NotImplementedError(level)
571                 return result
572
573         def execute(self,searchkey):
574                 """Performs the search for the supplied search key"""
575                 match_category = 0
576                 self.searchkey=searchkey
577                 self.packagematches = []
578                 if self.searchdesc:
579                         self.searchdesc=1
580                         self.matches = {"pkg":[], "desc":[], "set":[]}
581                 else:
582                         self.searchdesc=0
583                         self.matches = {"pkg":[], "set":[]}
584                 print "Searching...   ",
585
586                 regexsearch = False
587                 if self.searchkey.startswith('%'):
588                         regexsearch = True
589                         self.searchkey = self.searchkey[1:]
590                 if self.searchkey.startswith('@'):
591                         match_category = 1
592                         self.searchkey = self.searchkey[1:]
593                 if regexsearch:
594                         self.searchre=re.compile(self.searchkey,re.I)
595                 else:
596                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
597                 for package in self.portdb.cp_all():
598                         self.spinner.update()
599
600                         if match_category:
601                                 match_string  = package[:]
602                         else:
603                                 match_string  = package.split("/")[-1]
604
605                         masked=0
606                         if self.searchre.search(match_string):
607                                 if not self.portdb.xmatch("match-visible", package):
608                                         masked=1
609                                 self.matches["pkg"].append([package,masked])
610                         elif self.searchdesc: # DESCRIPTION searching
611                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
612                                 if not full_package:
613                                         #no match found; we don't want to query description
614                                         full_package = portage.best(
615                                                 self.portdb.xmatch("match-all", package))
616                                         if not full_package:
617                                                 continue
618                                         else:
619                                                 masked=1
620                                 try:
621                                         full_desc = self.portdb.aux_get(
622                                                 full_package, ["DESCRIPTION"])[0]
623                                 except KeyError:
624                                         print "emerge: search: aux_get() failed, skipping"
625                                         continue
626                                 if self.searchre.search(full_desc):
627                                         self.matches["desc"].append([full_package,masked])
628
629                 self.sdict = self.setconfig.getSets()
630                 for setname in self.sdict:
631                         self.spinner.update()
632                         if match_category:
633                                 match_string = setname
634                         else:
635                                 match_string = setname.split("/")[-1]
636                         
637                         if self.searchre.search(match_string):
638                                 self.matches["set"].append([setname, False])
639                         elif self.searchdesc:
640                                 if self.searchre.search(
641                                         self.sdict[setname].getMetadata("DESCRIPTION")):
642                                         self.matches["set"].append([setname, False])
643                         
644                 self.mlen=0
645                 for mtype in self.matches:
646                         self.matches[mtype].sort()
647                         self.mlen += len(self.matches[mtype])
648
649         def addCP(self, cp):
650                 if not self.portdb.xmatch("match-all", cp):
651                         return
652                 masked = 0
653                 if not self.portdb.xmatch("bestmatch-visible", cp):
654                         masked = 1
655                 self.matches["pkg"].append([cp, masked])
656                 self.mlen += 1
657
658         def output(self):
659                 """Outputs the results of the search."""
660                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
661                 print "[ Applications found : "+white(str(self.mlen))+" ]"
662                 print " "
663                 vardb = self.vartree.dbapi
664                 for mtype in self.matches:
665                         for match,masked in self.matches[mtype]:
666                                 full_package = None
667                                 if mtype == "pkg":
668                                         catpack = match
669                                         full_package = self.portdb.xmatch(
670                                                 "bestmatch-visible", match)
671                                         if not full_package:
672                                                 #no match found; we don't want to query description
673                                                 masked=1
674                                                 full_package = portage.best(
675                                                         self.portdb.xmatch("match-all",match))
676                                 elif mtype == "desc":
677                                         full_package = match
678                                         match        = portage.cpv_getkey(match)
679                                 elif mtype == "set":
680                                         print green("*")+"  "+white(match)
681                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
682                                         print
683                                 if full_package:
684                                         try:
685                                                 desc, homepage, license = self.portdb.aux_get(
686                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
687                                         except KeyError:
688                                                 print "emerge: search: aux_get() failed, skipping"
689                                                 continue
690                                         if masked:
691                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
692                                         else:
693                                                 print green("*")+"  "+white(match)
694                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
695
696                                         mysum = [0,0]
697                                         file_size_str = None
698                                         mycat = match.split("/")[0]
699                                         mypkg = match.split("/")[1]
700                                         mycpv = match + "-" + myversion
701                                         myebuild = self.portdb.findname(mycpv)
702                                         if myebuild:
703                                                 pkgdir = os.path.dirname(myebuild)
704                                                 from portage import manifest
705                                                 mf = manifest.Manifest(
706                                                         pkgdir, self.settings["DISTDIR"])
707                                                 try:
708                                                         uri_map = self.portdb.getFetchMap(mycpv)
709                                                 except portage.exception.InvalidDependString, e:
710                                                         file_size_str = "Unknown (%s)" % (e,)
711                                                         del e
712                                                 else:
713                                                         try:
714                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
715                                                         except KeyError, e:
716                                                                 file_size_str = "Unknown (missing " + \
717                                                                         "digest for %s)" % (e,)
718                                                                 del e
719
720                                         available = False
721                                         for db in self._dbs:
722                                                 if db is not vardb and \
723                                                         db.cpv_exists(mycpv):
724                                                         available = True
725                                                         if not myebuild and hasattr(db, "bintree"):
726                                                                 myebuild = db.bintree.getname(mycpv)
727                                                                 try:
728                                                                         mysum[0] = os.stat(myebuild).st_size
729                                                                 except OSError:
730                                                                         myebuild = None
731                                                         break
732
733                                         if myebuild and file_size_str is None:
734                                                 mystr = str(mysum[0] / 1024)
735                                                 mycount = len(mystr)
736                                                 while (mycount > 3):
737                                                         mycount -= 3
738                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
739                                                 file_size_str = mystr + " kB"
740
741                                         if self.verbose:
742                                                 if available:
743                                                         print "     ", darkgreen("Latest version available:"),myversion
744                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
745                                                 if myebuild:
746                                                         print "      %s %s" % \
747                                                                 (darkgreen("Size of files:"), file_size_str)
748                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
749                                                 print "     ", darkgreen("Description:")+"  ",desc
750                                                 print "     ", darkgreen("License:")+"      ",license
751                                                 print
752         #
753         # private interface
754         #
755         def getInstallationStatus(self,package):
756                 installed_package = self.vartree.dep_bestmatch(package)
757                 result = ""
758                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
759                 if len(version) > 0:
760                         result = darkgreen("Latest version installed:")+" "+version
761                 else:
762                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
763                 return result
764
765         def getVersion(self,full_package,detail):
766                 if len(full_package) > 1:
767                         package_parts = portage.catpkgsplit(full_package)
768                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
769                                 result = package_parts[2]+ "-" + package_parts[3]
770                         else:
771                                 result = package_parts[2]
772                 else:
773                         result = ""
774                 return result
775
776 class RootConfig(object):
777         """This is used internally by depgraph to track information about a
778         particular $ROOT."""
779
780         pkg_tree_map = {
781                 "ebuild"    : "porttree",
782                 "binary"    : "bintree",
783                 "installed" : "vartree"
784         }
785
786         tree_pkg_map = {}
787         for k, v in pkg_tree_map.iteritems():
788                 tree_pkg_map[v] = k
789
790         def __init__(self, settings, trees, setconfig):
791                 self.trees = trees
792                 self.settings = settings
793                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
794                 self.root = self.settings["ROOT"]
795                 self.setconfig = setconfig
796                 if setconfig is None:
797                         self.sets = {}
798                 else:
799                         self.sets = self.setconfig.getSets()
800                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
801
802 def create_world_atom(pkg, args_set, root_config):
803         """Create a new atom for the world file if one does not exist.  If the
804         argument atom is precise enough to identify a specific slot then a slot
805         atom will be returned. Atoms that are in the system set may also be stored
806         in world since system atoms can only match one slot while world atoms can
807         be greedy with respect to slots.  Unslotted system packages will not be
808         stored in world."""
809
810         arg_atom = args_set.findAtomForPackage(pkg)
811         if not arg_atom:
812                 return None
813         cp = portage.dep_getkey(arg_atom)
814         new_world_atom = cp
815         sets = root_config.sets
816         portdb = root_config.trees["porttree"].dbapi
817         vardb = root_config.trees["vartree"].dbapi
818         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
819                 for cpv in portdb.match(cp))
820         slotted = len(available_slots) > 1 or \
821                 (len(available_slots) == 1 and "0" not in available_slots)
822         if not slotted:
823                 # check the vdb in case this is multislot
824                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
825                         for cpv in vardb.match(cp))
826                 slotted = len(available_slots) > 1 or \
827                         (len(available_slots) == 1 and "0" not in available_slots)
828         if slotted and arg_atom != cp:
829                 # If the user gave a specific atom, store it as a
830                 # slot atom in the world file.
831                 slot_atom = pkg.slot_atom
832
833                 # For USE=multislot, there are a couple of cases to
834                 # handle here:
835                 #
836                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
837                 #    unknown value, so just record an unslotted atom.
838                 #
839                 # 2) SLOT comes from an installed package and there is no
840                 #    matching SLOT in the portage tree.
841                 #
842                 # Make sure that the slot atom is available in either the
843                 # portdb or the vardb, since otherwise the user certainly
844                 # doesn't want the SLOT atom recorded in the world file
845                 # (case 1 above).  If it's only available in the vardb,
846                 # the user may be trying to prevent a USE=multislot
847                 # package from being removed by --depclean (case 2 above).
848
849                 mydb = portdb
850                 if not portdb.match(slot_atom):
851                         # SLOT seems to come from an installed multislot package
852                         mydb = vardb
853                 # If there is no installed package matching the SLOT atom,
854                 # it probably changed SLOT spontaneously due to USE=multislot,
855                 # so just record an unslotted atom.
856                 if vardb.match(slot_atom):
857                         # Now verify that the argument is precise
858                         # enough to identify a specific slot.
859                         matches = mydb.match(arg_atom)
860                         matched_slots = set()
861                         for cpv in matches:
862                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
863                         if len(matched_slots) == 1:
864                                 new_world_atom = slot_atom
865
866         if new_world_atom == sets["world"].findAtomForPackage(pkg):
867                 # Both atoms would be identical, so there's nothing to add.
868                 return None
869         if not slotted:
870                 # Unlike world atoms, system atoms are not greedy for slots, so they
871                 # can't be safely excluded from world if they are slotted.
872                 system_atom = sets["system"].findAtomForPackage(pkg)
873                 if system_atom:
874                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
875                                 return None
876                         # System virtuals aren't safe to exclude from world since they can
877                         # match multiple old-style virtuals but only one of them will be
878                         # pulled in by update or depclean.
879                         providers = portdb.mysettings.getvirtuals().get(
880                                 portage.dep_getkey(system_atom))
881                         if providers and len(providers) == 1 and providers[0] == cp:
882                                 return None
883         return new_world_atom
884
885 def filter_iuse_defaults(iuse):
886         for flag in iuse:
887                 if flag.startswith("+") or flag.startswith("-"):
888                         yield flag[1:]
889                 else:
890                         yield flag
891
892 def _find_deep_system_runtime_deps(graph):
893         deep_system_deps = set()
894         node_stack = []
895         for node in graph:
896                 if not isinstance(node, Package) or \
897                         node.operation == 'uninstall':
898                         continue
899                 if node.root_config.sets['system'].findAtomForPackage(node):
900                         node_stack.append(node)
901
902         def ignore_priority(priority):
903                 """
904                 Ignore non-runtime priorities.
905                 """
906                 if isinstance(priority, DepPriority) and \
907                         (priority.runtime or priority.runtime_post):
908                         return False
909                 return True
910
911         while node_stack:
912                 node = node_stack.pop()
913                 if node in deep_system_deps:
914                         continue
915                 deep_system_deps.add(node)
916                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
917                         if not isinstance(child, Package) or \
918                                 child.operation == 'uninstall':
919                                 continue
920                         node_stack.append(child)
921
922         return deep_system_deps
923
924 class FakeVartree(portage.vartree):
925         """This is implements an in-memory copy of a vartree instance that provides
926         all the interfaces required for use by the depgraph.  The vardb is locked
927         during the constructor call just long enough to read a copy of the
928         installed package information.  This allows the depgraph to do it's
929         dependency calculations without holding a lock on the vardb.  It also
930         allows things like vardb global updates to be done in memory so that the
931         user doesn't necessarily need write access to the vardb in cases where
932         global updates are necessary (updates are performed when necessary if there
933         is not a matching ebuild in the tree)."""
934         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
935                 self._root_config = root_config
936                 if pkg_cache is None:
937                         pkg_cache = {}
938                 real_vartree = root_config.trees["vartree"]
939                 portdb = root_config.trees["porttree"].dbapi
940                 self.root = real_vartree.root
941                 self.settings = real_vartree.settings
942                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
943                 if "_mtime_" not in mykeys:
944                         mykeys.append("_mtime_")
945                 self._db_keys = mykeys
946                 self._pkg_cache = pkg_cache
947                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
948                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
949                 try:
950                         # At least the parent needs to exist for the lock file.
951                         portage.util.ensure_dirs(vdb_path)
952                 except portage.exception.PortageException:
953                         pass
954                 vdb_lock = None
955                 try:
956                         if acquire_lock and os.access(vdb_path, os.W_OK):
957                                 vdb_lock = portage.locks.lockdir(vdb_path)
958                         real_dbapi = real_vartree.dbapi
959                         slot_counters = {}
960                         for cpv in real_dbapi.cpv_all():
961                                 cache_key = ("installed", self.root, cpv, "nomerge")
962                                 pkg = self._pkg_cache.get(cache_key)
963                                 if pkg is not None:
964                                         metadata = pkg.metadata
965                                 else:
966                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
967                                 myslot = metadata["SLOT"]
968                                 mycp = portage.dep_getkey(cpv)
969                                 myslot_atom = "%s:%s" % (mycp, myslot)
970                                 try:
971                                         mycounter = long(metadata["COUNTER"])
972                                 except ValueError:
973                                         mycounter = 0
974                                         metadata["COUNTER"] = str(mycounter)
975                                 other_counter = slot_counters.get(myslot_atom, None)
976                                 if other_counter is not None:
977                                         if other_counter > mycounter:
978                                                 continue
979                                 slot_counters[myslot_atom] = mycounter
980                                 if pkg is None:
981                                         pkg = Package(built=True, cpv=cpv,
982                                                 installed=True, metadata=metadata,
983                                                 root_config=root_config, type_name="installed")
984                                 self._pkg_cache[pkg] = pkg
985                                 self.dbapi.cpv_inject(pkg)
986                         real_dbapi.flush_cache()
987                 finally:
988                         if vdb_lock:
989                                 portage.locks.unlockdir(vdb_lock)
990                 # Populate the old-style virtuals using the cached values.
991                 if not self.settings.treeVirtuals:
992                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
993                                 portage.getCPFromCPV, self.get_all_provides())
994
995                 # Intialize variables needed for lazy cache pulls of the live ebuild
996                 # metadata.  This ensures that the vardb lock is released ASAP, without
997                 # being delayed in case cache generation is triggered.
998                 self._aux_get = self.dbapi.aux_get
999                 self.dbapi.aux_get = self._aux_get_wrapper
1000                 self._match = self.dbapi.match
1001                 self.dbapi.match = self._match_wrapper
1002                 self._aux_get_history = set()
1003                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1004                 self._portdb = portdb
1005                 self._global_updates = None
1006
1007         def _match_wrapper(self, cpv, use_cache=1):
1008                 """
1009                 Make sure the metadata in Package instances gets updated for any
1010                 cpv that is returned from a match() call, since the metadata can
1011                 be accessed directly from the Package instance instead of via
1012                 aux_get().
1013                 """
1014                 matches = self._match(cpv, use_cache=use_cache)
1015                 for cpv in matches:
1016                         if cpv in self._aux_get_history:
1017                                 continue
1018                         self._aux_get_wrapper(cpv, [])
1019                 return matches
1020
1021         def _aux_get_wrapper(self, pkg, wants):
1022                 if pkg in self._aux_get_history:
1023                         return self._aux_get(pkg, wants)
1024                 self._aux_get_history.add(pkg)
1025                 try:
1026                         # Use the live ebuild metadata if possible.
1027                         live_metadata = dict(izip(self._portdb_keys,
1028                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1029                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1030                                 raise KeyError(pkg)
1031                         self.dbapi.aux_update(pkg, live_metadata)
1032                 except (KeyError, portage.exception.PortageException):
1033                         if self._global_updates is None:
1034                                 self._global_updates = \
1035                                         grab_global_updates(self._portdb.porttree_root)
1036                         perform_global_updates(
1037                                 pkg, self.dbapi, self._global_updates)
1038                 return self._aux_get(pkg, wants)
1039
1040         def sync(self, acquire_lock=1):
1041                 """
1042                 Call this method to synchronize state with the real vardb
1043                 after one or more packages may have been installed or
1044                 uninstalled.
1045                 """
1046                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1047                 try:
1048                         # At least the parent needs to exist for the lock file.
1049                         portage.util.ensure_dirs(vdb_path)
1050                 except portage.exception.PortageException:
1051                         pass
1052                 vdb_lock = None
1053                 try:
1054                         if acquire_lock and os.access(vdb_path, os.W_OK):
1055                                 vdb_lock = portage.locks.lockdir(vdb_path)
1056                         self._sync()
1057                 finally:
1058                         if vdb_lock:
1059                                 portage.locks.unlockdir(vdb_lock)
1060
1061         def _sync(self):
1062
1063                 real_vardb = self._root_config.trees["vartree"].dbapi
1064                 current_cpv_set = frozenset(real_vardb.cpv_all())
1065                 pkg_vardb = self.dbapi
1066                 aux_get_history = self._aux_get_history
1067
1068                 # Remove any packages that have been uninstalled.
1069                 for pkg in list(pkg_vardb):
1070                         if pkg.cpv not in current_cpv_set:
1071                                 pkg_vardb.cpv_remove(pkg)
1072                                 aux_get_history.discard(pkg.cpv)
1073
1074                 # Validate counters and timestamps.
1075                 slot_counters = {}
1076                 root = self.root
1077                 validation_keys = ["COUNTER", "_mtime_"]
1078                 for cpv in current_cpv_set:
1079
1080                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1081                         pkg = pkg_vardb.get(pkg_hash_key)
1082                         if pkg is not None:
1083                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1084                                 try:
1085                                         counter = long(counter)
1086                                 except ValueError:
1087                                         counter = 0
1088
1089                                 if counter != pkg.counter or \
1090                                         mtime != pkg.mtime:
1091                                         pkg_vardb.cpv_remove(pkg)
1092                                         aux_get_history.discard(pkg.cpv)
1093                                         pkg = None
1094
1095                         if pkg is None:
1096                                 pkg = self._pkg(cpv)
1097
1098                         other_counter = slot_counters.get(pkg.slot_atom)
1099                         if other_counter is not None:
1100                                 if other_counter > pkg.counter:
1101                                         continue
1102
1103                         slot_counters[pkg.slot_atom] = pkg.counter
1104                         pkg_vardb.cpv_inject(pkg)
1105
1106                 real_vardb.flush_cache()
1107
1108         def _pkg(self, cpv):
1109                 root_config = self._root_config
1110                 real_vardb = root_config.trees["vartree"].dbapi
1111                 pkg = Package(cpv=cpv, installed=True,
1112                         metadata=izip(self._db_keys,
1113                         real_vardb.aux_get(cpv, self._db_keys)),
1114                         root_config=root_config,
1115                         type_name="installed")
1116
1117                 try:
1118                         mycounter = long(pkg.metadata["COUNTER"])
1119                 except ValueError:
1120                         mycounter = 0
1121                         pkg.metadata["COUNTER"] = str(mycounter)
1122
1123                 return pkg
1124
1125 def grab_global_updates(portdir):
1126         from portage.update import grab_updates, parse_updates
1127         updpath = os.path.join(portdir, "profiles", "updates")
1128         try:
1129                 rawupdates = grab_updates(updpath)
1130         except portage.exception.DirectoryNotFound:
1131                 rawupdates = []
1132         upd_commands = []
1133         for mykey, mystat, mycontent in rawupdates:
1134                 commands, errors = parse_updates(mycontent)
1135                 upd_commands.extend(commands)
1136         return upd_commands
1137
1138 def perform_global_updates(mycpv, mydb, mycommands):
1139         from portage.update import update_dbentries
1140         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1141         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1142         updates = update_dbentries(mycommands, aux_dict)
1143         if updates:
1144                 mydb.aux_update(mycpv, updates)
1145
1146 def visible(pkgsettings, pkg):
1147         """
1148         Check if a package is visible. This can raise an InvalidDependString
1149         exception if LICENSE is invalid.
1150         TODO: optionally generate a list of masking reasons
1151         @rtype: Boolean
1152         @returns: True if the package is visible, False otherwise.
1153         """
1154         if not pkg.metadata["SLOT"]:
1155                 return False
1156         if not pkg.installed:
1157                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1158                         return False
1159         eapi = pkg.metadata["EAPI"]
1160         if not portage.eapi_is_supported(eapi):
1161                 return False
1162         if not pkg.installed:
1163                 if portage._eapi_is_deprecated(eapi):
1164                         return False
1165                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1166                         return False
1167         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1168                 return False
1169         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1170                 return False
1171         try:
1172                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1173                         return False
1174         except portage.exception.InvalidDependString:
1175                 return False
1176         return True
1177
1178 def get_masking_status(pkg, pkgsettings, root_config):
1179
1180         mreasons = portage.getmaskingstatus(
1181                 pkg, settings=pkgsettings,
1182                 portdb=root_config.trees["porttree"].dbapi)
1183
1184         if not pkg.installed:
1185                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1186                         mreasons.append("CHOST: %s" % \
1187                                 pkg.metadata["CHOST"])
1188
1189         if not pkg.metadata["SLOT"]:
1190                 mreasons.append("invalid: SLOT is undefined")
1191
1192         return mreasons
1193
1194 def get_mask_info(root_config, cpv, pkgsettings,
1195         db, pkg_type, built, installed, db_keys):
1196         eapi_masked = False
1197         try:
1198                 metadata = dict(izip(db_keys,
1199                         db.aux_get(cpv, db_keys)))
1200         except KeyError:
1201                 metadata = None
1202         if metadata and not built:
1203                 pkgsettings.setcpv(cpv, mydb=metadata)
1204                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1205                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1206         if metadata is None:
1207                 mreasons = ["corruption"]
1208         else:
1209                 eapi = metadata['EAPI']
1210                 if eapi[:1] == '-':
1211                         eapi = eapi[1:]
1212                 if not portage.eapi_is_supported(eapi):
1213                         mreasons = ['EAPI %s' % eapi]
1214                 else:
1215                         pkg = Package(type_name=pkg_type, root_config=root_config,
1216                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1217                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1218         return metadata, mreasons
1219
1220 def show_masked_packages(masked_packages):
1221         shown_licenses = set()
1222         shown_comments = set()
1223         # Maybe there is both an ebuild and a binary. Only
1224         # show one of them to avoid redundant appearance.
1225         shown_cpvs = set()
1226         have_eapi_mask = False
1227         for (root_config, pkgsettings, cpv,
1228                 metadata, mreasons) in masked_packages:
1229                 if cpv in shown_cpvs:
1230                         continue
1231                 shown_cpvs.add(cpv)
1232                 comment, filename = None, None
1233                 if "package.mask" in mreasons:
1234                         comment, filename = \
1235                                 portage.getmaskingreason(
1236                                 cpv, metadata=metadata,
1237                                 settings=pkgsettings,
1238                                 portdb=root_config.trees["porttree"].dbapi,
1239                                 return_location=True)
1240                 missing_licenses = []
1241                 if metadata:
1242                         if not portage.eapi_is_supported(metadata["EAPI"]):
1243                                 have_eapi_mask = True
1244                         try:
1245                                 missing_licenses = \
1246                                         pkgsettings._getMissingLicenses(
1247                                                 cpv, metadata)
1248                         except portage.exception.InvalidDependString:
1249                                 # This will have already been reported
1250                                 # above via mreasons.
1251                                 pass
1252
1253                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1254                 if comment and comment not in shown_comments:
1255                         print filename+":"
1256                         print comment
1257                         shown_comments.add(comment)
1258                 portdb = root_config.trees["porttree"].dbapi
1259                 for l in missing_licenses:
1260                         l_path = portdb.findLicensePath(l)
1261                         if l in shown_licenses:
1262                                 continue
1263                         msg = ("A copy of the '%s' license" + \
1264                         " is located at '%s'.") % (l, l_path)
1265                         print msg
1266                         print
1267                         shown_licenses.add(l)
1268         return have_eapi_mask
1269
1270 class Package(Task):
1271
1272         __hash__ = Task.__hash__
1273         __slots__ = ("built", "cpv", "depth",
1274                 "installed", "metadata", "onlydeps", "operation",
1275                 "root_config", "type_name",
1276                 "category", "counter", "cp", "cpv_split",
1277                 "inherited", "iuse", "mtime",
1278                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1279
1280         metadata_keys = [
1281                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1282                 "INHERITED", "IUSE", "KEYWORDS",
1283                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1284                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1285
1286         def __init__(self, **kwargs):
1287                 Task.__init__(self, **kwargs)
1288                 self.root = self.root_config.root
1289                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1290                 self.cp = portage.cpv_getkey(self.cpv)
1291                 slot = self.slot
1292                 if not slot:
1293                         # Avoid an InvalidAtom exception when creating slot_atom.
1294                         # This package instance will be masked due to empty SLOT.
1295                         slot = '0'
1296                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1297                 self.category, self.pf = portage.catsplit(self.cpv)
1298                 self.cpv_split = portage.catpkgsplit(self.cpv)
1299                 self.pv_split = self.cpv_split[1:]
1300
1301         class _use(object):
1302
1303                 __slots__ = ("__weakref__", "enabled")
1304
1305                 def __init__(self, use):
1306                         self.enabled = frozenset(use)
1307
1308         class _iuse(object):
1309
1310                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1311
1312                 def __init__(self, tokens, iuse_implicit):
1313                         self.tokens = tuple(tokens)
1314                         self.iuse_implicit = iuse_implicit
1315                         enabled = []
1316                         disabled = []
1317                         other = []
1318                         for x in tokens:
1319                                 prefix = x[:1]
1320                                 if prefix == "+":
1321                                         enabled.append(x[1:])
1322                                 elif prefix == "-":
1323                                         disabled.append(x[1:])
1324                                 else:
1325                                         other.append(x)
1326                         self.enabled = frozenset(enabled)
1327                         self.disabled = frozenset(disabled)
1328                         self.all = frozenset(chain(enabled, disabled, other))
1329
1330                 def __getattribute__(self, name):
1331                         if name == "regex":
1332                                 try:
1333                                         return object.__getattribute__(self, "regex")
1334                                 except AttributeError:
1335                                         all = object.__getattribute__(self, "all")
1336                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1337                                         # Escape anything except ".*" which is supposed
1338                                         # to pass through from _get_implicit_iuse()
1339                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1340                                         regex = "^(%s)$" % "|".join(regex)
1341                                         regex = regex.replace("\\.\\*", ".*")
1342                                         self.regex = re.compile(regex)
1343                         return object.__getattribute__(self, name)
1344
1345         def _get_hash_key(self):
1346                 hash_key = getattr(self, "_hash_key", None)
1347                 if hash_key is None:
1348                         if self.operation is None:
1349                                 self.operation = "merge"
1350                                 if self.onlydeps or self.installed:
1351                                         self.operation = "nomerge"
1352                         self._hash_key = \
1353                                 (self.type_name, self.root, self.cpv, self.operation)
1354                 return self._hash_key
1355
1356         def __lt__(self, other):
1357                 if other.cp != self.cp:
1358                         return False
1359                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1360                         return True
1361                 return False
1362
1363         def __le__(self, other):
1364                 if other.cp != self.cp:
1365                         return False
1366                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1367                         return True
1368                 return False
1369
1370         def __gt__(self, other):
1371                 if other.cp != self.cp:
1372                         return False
1373                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1374                         return True
1375                 return False
1376
1377         def __ge__(self, other):
1378                 if other.cp != self.cp:
1379                         return False
1380                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1381                         return True
1382                 return False
1383
1384 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1385         if not x.startswith("UNUSED_"))
1386 _all_metadata_keys.discard("CDEPEND")
1387 _all_metadata_keys.update(Package.metadata_keys)
1388
1389 from portage.cache.mappings import slot_dict_class
1390 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1391
1392 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1393         """
1394         Detect metadata updates and synchronize Package attributes.
1395         """
1396
1397         __slots__ = ("_pkg",)
1398         _wrapped_keys = frozenset(
1399                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1400
1401         def __init__(self, pkg, metadata):
1402                 _PackageMetadataWrapperBase.__init__(self)
1403                 self._pkg = pkg
1404                 self.update(metadata)
1405
1406         def __setitem__(self, k, v):
1407                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1408                 if k in self._wrapped_keys:
1409                         getattr(self, "_set_" + k.lower())(k, v)
1410
1411         def _set_inherited(self, k, v):
1412                 if isinstance(v, basestring):
1413                         v = frozenset(v.split())
1414                 self._pkg.inherited = v
1415
1416         def _set_iuse(self, k, v):
1417                 self._pkg.iuse = self._pkg._iuse(
1418                         v.split(), self._pkg.root_config.iuse_implicit)
1419
1420         def _set_slot(self, k, v):
1421                 self._pkg.slot = v
1422
1423         def _set_use(self, k, v):
1424                 self._pkg.use = self._pkg._use(v.split())
1425
1426         def _set_counter(self, k, v):
1427                 if isinstance(v, basestring):
1428                         try:
1429                                 v = long(v.strip())
1430                         except ValueError:
1431                                 v = 0
1432                 self._pkg.counter = v
1433
1434         def _set__mtime_(self, k, v):
1435                 if isinstance(v, basestring):
1436                         try:
1437                                 v = long(v.strip())
1438                         except ValueError:
1439                                 v = 0
1440                 self._pkg.mtime = v
1441
1442 class PackageUninstall(AsynchronousTask):
1443
1444         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
1445
1446         def _start(self):
1447                 try:
1448                         unmerge(self.pkg.root_config, self.opts, "unmerge",
1449                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
1450                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
1451                                 writemsg_level=self._writemsg_level)
1452                 except UninstallFailure, e:
1453                         self.returncode = e.status
1454                 else:
1455                         self.returncode = os.EX_OK
1456                 self.wait()
1457
1458         def _writemsg_level(self, msg, level=0, noiselevel=0):
1459
1460                 log_path = self.settings.get("PORTAGE_LOG_FILE")
1461                 background = self.background
1462
1463                 if log_path is None:
1464                         if not (background and level < logging.WARNING):
1465                                 portage.util.writemsg_level(msg,
1466                                         level=level, noiselevel=noiselevel)
1467                 else:
1468                         if not background:
1469                                 portage.util.writemsg_level(msg,
1470                                         level=level, noiselevel=noiselevel)
1471
1472                         f = open(log_path, 'a')
1473                         try:
1474                                 f.write(msg)
1475                         finally:
1476                                 f.close()
1477
1478 class MergeListItem(CompositeTask):
1479
1480         """
1481         TODO: For parallel scheduling, everything here needs asynchronous
1482         execution support (start, poll, and wait methods).
1483         """
1484
1485         __slots__ = ("args_set",
1486                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
1487                 "find_blockers", "logger", "mtimedb", "pkg",
1488                 "pkg_count", "pkg_to_replace", "prefetcher",
1489                 "settings", "statusMessage", "world_atom") + \
1490                 ("_install_task",)
1491
1492         def _start(self):
1493
1494                 pkg = self.pkg
1495                 build_opts = self.build_opts
1496
1497                 if pkg.installed:
1498                         # uninstall,  executed by self.merge()
1499                         self.returncode = os.EX_OK
1500                         self.wait()
1501                         return
1502
1503                 args_set = self.args_set
1504                 find_blockers = self.find_blockers
1505                 logger = self.logger
1506                 mtimedb = self.mtimedb
1507                 pkg_count = self.pkg_count
1508                 scheduler = self.scheduler
1509                 settings = self.settings
1510                 world_atom = self.world_atom
1511                 ldpath_mtimes = mtimedb["ldpath"]
1512
1513                 action_desc = "Emerging"
1514                 preposition = "for"
1515                 if pkg.type_name == "binary":
1516                         action_desc += " binary"
1517
1518                 if build_opts.fetchonly:
1519                         action_desc = "Fetching"
1520
1521                 msg = "%s (%s of %s) %s" % \
1522                         (action_desc,
1523                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
1524                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
1525                         colorize("GOOD", pkg.cpv))
1526
1527                 portdb = pkg.root_config.trees["porttree"].dbapi
1528                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
1529                 if portdir_repo_name:
1530                         pkg_repo_name = pkg.metadata.get("repository")
1531                         if pkg_repo_name != portdir_repo_name:
1532                                 if not pkg_repo_name:
1533                                         pkg_repo_name = "unknown repo"
1534                                 msg += " from %s" % pkg_repo_name
1535
1536                 if pkg.root != "/":
1537                         msg += " %s %s" % (preposition, pkg.root)
1538
1539                 if not build_opts.pretend:
1540                         self.statusMessage(msg)
1541                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
1542                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
1543
1544                 if pkg.type_name == "ebuild":
1545
1546                         build = EbuildBuild(args_set=args_set,
1547                                 background=self.background,
1548                                 config_pool=self.config_pool,
1549                                 find_blockers=find_blockers,
1550                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
1551                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
1552                                 prefetcher=self.prefetcher, scheduler=scheduler,
1553                                 settings=settings, world_atom=world_atom)
1554
1555                         self._install_task = build
1556                         self._start_task(build, self._default_final_exit)
1557                         return
1558
1559                 elif pkg.type_name == "binary":
1560
1561                         binpkg = Binpkg(background=self.background,
1562                                 find_blockers=find_blockers,
1563                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
1564                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
1565                                 prefetcher=self.prefetcher, settings=settings,
1566                                 scheduler=scheduler, world_atom=world_atom)
1567
1568                         self._install_task = binpkg
1569                         self._start_task(binpkg, self._default_final_exit)
1570                         return
1571
1572         def _poll(self):
1573                 self._install_task.poll()
1574                 return self.returncode
1575
1576         def _wait(self):
1577                 self._install_task.wait()
1578                 return self.returncode
1579
1580         def merge(self):
1581
1582                 pkg = self.pkg
1583                 build_opts = self.build_opts
1584                 find_blockers = self.find_blockers
1585                 logger = self.logger
1586                 mtimedb = self.mtimedb
1587                 pkg_count = self.pkg_count
1588                 prefetcher = self.prefetcher
1589                 scheduler = self.scheduler
1590                 settings = self.settings
1591                 world_atom = self.world_atom
1592                 ldpath_mtimes = mtimedb["ldpath"]
1593
1594                 if pkg.installed:
1595                         if not (build_opts.buildpkgonly or \
1596                                 build_opts.fetchonly or build_opts.pretend):
1597
1598                                 uninstall = PackageUninstall(background=self.background,
1599                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
1600                                         pkg=pkg, scheduler=scheduler, settings=settings)
1601
1602                                 uninstall.start()
1603                                 retval = uninstall.wait()
1604                                 if retval != os.EX_OK:
1605                                         return retval
1606                         return os.EX_OK
1607
1608                 if build_opts.fetchonly or \
1609                         build_opts.buildpkgonly:
1610                         return self.returncode
1611
1612                 retval = self._install_task.install()
1613                 return retval
1614
1615 class BlockerDB(object):
1616
1617         def __init__(self, root_config):
1618                 self._root_config = root_config
1619                 self._vartree = root_config.trees["vartree"]
1620                 self._portdb = root_config.trees["porttree"].dbapi
1621
1622                 self._dep_check_trees = None
1623                 self._fake_vartree = None
1624
1625         def _get_fake_vartree(self, acquire_lock=0):
1626                 fake_vartree = self._fake_vartree
1627                 if fake_vartree is None:
1628                         fake_vartree = FakeVartree(self._root_config,
1629                                 acquire_lock=acquire_lock)
1630                         self._fake_vartree = fake_vartree
1631                         self._dep_check_trees = { self._vartree.root : {
1632                                 "porttree"    :  fake_vartree,
1633                                 "vartree"     :  fake_vartree,
1634                         }}
1635                 else:
1636                         fake_vartree.sync(acquire_lock=acquire_lock)
1637                 return fake_vartree
1638
1639         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
1640                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
1641                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1642                 settings = self._vartree.settings
1643                 stale_cache = set(blocker_cache)
1644                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
1645                 dep_check_trees = self._dep_check_trees
1646                 vardb = fake_vartree.dbapi
1647                 installed_pkgs = list(vardb)
1648
1649                 for inst_pkg in installed_pkgs:
1650                         stale_cache.discard(inst_pkg.cpv)
1651                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
1652                         if cached_blockers is not None and \
1653                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
1654                                 cached_blockers = None
1655                         if cached_blockers is not None:
1656                                 blocker_atoms = cached_blockers.atoms
1657                         else:
1658                                 # Use aux_get() to trigger FakeVartree global
1659                                 # updates on *DEPEND when appropriate.
1660                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
1661                                 try:
1662                                         portage.dep._dep_check_strict = False
1663                                         success, atoms = portage.dep_check(depstr,
1664                                                 vardb, settings, myuse=inst_pkg.use.enabled,
1665                                                 trees=dep_check_trees, myroot=inst_pkg.root)
1666                                 finally:
1667                                         portage.dep._dep_check_strict = True
1668                                 if not success:
1669                                         pkg_location = os.path.join(inst_pkg.root,
1670                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
1671                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
1672                                                 (pkg_location, atoms), noiselevel=-1)
1673                                         continue
1674
1675                                 blocker_atoms = [atom for atom in atoms \
1676                                         if atom.startswith("!")]
1677                                 blocker_atoms.sort()
1678                                 counter = long(inst_pkg.metadata["COUNTER"])
1679                                 blocker_cache[inst_pkg.cpv] = \
1680                                         blocker_cache.BlockerData(counter, blocker_atoms)
1681                 for cpv in stale_cache:
1682                         del blocker_cache[cpv]
1683                 blocker_cache.flush()
1684
1685                 blocker_parents = digraph()
1686                 blocker_atoms = []
1687                 for pkg in installed_pkgs:
1688                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
1689                                 blocker_atom = blocker_atom.lstrip("!")
1690                                 blocker_atoms.append(blocker_atom)
1691                                 blocker_parents.add(blocker_atom, pkg)
1692
1693                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1694                 blocking_pkgs = set()
1695                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
1696                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
1697
1698                 # Check for blockers in the other direction.
1699                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
1700                 try:
1701                         portage.dep._dep_check_strict = False
1702                         success, atoms = portage.dep_check(depstr,
1703                                 vardb, settings, myuse=new_pkg.use.enabled,
1704                                 trees=dep_check_trees, myroot=new_pkg.root)
1705                 finally:
1706                         portage.dep._dep_check_strict = True
1707                 if not success:
1708                         # We should never get this far with invalid deps.
1709                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
1710                         assert False
1711
1712                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
1713                         if atom[:1] == "!"]
1714                 if blocker_atoms:
1715                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
1716                         for inst_pkg in installed_pkgs:
1717                                 try:
1718                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
1719                                 except (portage.exception.InvalidDependString, StopIteration):
1720                                         continue
1721                                 blocking_pkgs.add(inst_pkg)
1722
1723                 return blocking_pkgs
1724
1725 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
1726
1727         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
1728                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
1729         p_type, p_root, p_key, p_status = parent_node
1730         msg = []
1731         if p_status == "nomerge":
1732                 category, pf = portage.catsplit(p_key)
1733                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
1734                 msg.append("Portage is unable to process the dependencies of the ")
1735                 msg.append("'%s' package. " % p_key)
1736                 msg.append("In order to correct this problem, the package ")
1737                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
1738                 msg.append("As a temporary workaround, the --nodeps option can ")
1739                 msg.append("be used to ignore all dependencies.  For reference, ")
1740                 msg.append("the problematic dependencies can be found in the ")
1741                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
1742         else:
1743                 msg.append("This package can not be installed. ")
1744                 msg.append("Please notify the '%s' package maintainer " % p_key)
1745                 msg.append("about this problem.")
1746
1747         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
1748         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
1749
1750 class depgraph(object):
1751
1752         pkg_tree_map = RootConfig.pkg_tree_map
1753
1754         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1755
1756         def __init__(self, settings, trees, myopts, myparams, spinner):
1757                 self.settings = settings
1758                 self.target_root = settings["ROOT"]
1759                 self.myopts = myopts
1760                 self.myparams = myparams
1761                 self.edebug = 0
1762                 if settings.get("PORTAGE_DEBUG", "") == "1":
1763                         self.edebug = 1
1764                 self.spinner = spinner
1765                 self._running_root = trees["/"]["root_config"]
1766                 self._opts_no_restart = Scheduler._opts_no_restart
1767                 self.pkgsettings = {}
1768                 # Maps slot atom to package for each Package added to the graph.
1769                 self._slot_pkg_map = {}
1770                 # Maps nodes to the reasons they were selected for reinstallation.
1771                 self._reinstall_nodes = {}
1772                 self.mydbapi = {}
1773                 self.trees = {}
1774                 self._trees_orig = trees
1775                 self.roots = {}
1776                 # Contains a filtered view of preferred packages that are selected
1777                 # from available repositories.
1778                 self._filtered_trees = {}
1779                 # Contains installed packages and new packages that have been added
1780                 # to the graph.
1781                 self._graph_trees = {}
1782                 # All Package instances
1783                 self._pkg_cache = {}
1784                 for myroot in trees:
1785                         self.trees[myroot] = {}
1786                         # Create a RootConfig instance that references
1787                         # the FakeVartree instead of the real one.
1788                         self.roots[myroot] = RootConfig(
1789                                 trees[myroot]["vartree"].settings,
1790                                 self.trees[myroot],
1791                                 trees[myroot]["root_config"].setconfig)
1792                         for tree in ("porttree", "bintree"):
1793                                 self.trees[myroot][tree] = trees[myroot][tree]
1794                         self.trees[myroot]["vartree"] = \
1795                                 FakeVartree(trees[myroot]["root_config"],
1796                                         pkg_cache=self._pkg_cache)
1797                         self.pkgsettings[myroot] = portage.config(
1798                                 clone=self.trees[myroot]["vartree"].settings)
1799                         self._slot_pkg_map[myroot] = {}
1800                         vardb = self.trees[myroot]["vartree"].dbapi
1801                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
1802                                 "--buildpkgonly" not in self.myopts
1803                         # This fakedbapi instance will model the state that the vdb will
1804                         # have after new packages have been installed.
1805                         fakedb = PackageVirtualDbapi(vardb.settings)
1806                         if preload_installed_pkgs:
1807                                 for pkg in vardb:
1808                                         self.spinner.update()
1809                                         # This triggers metadata updates via FakeVartree.
1810                                         vardb.aux_get(pkg.cpv, [])
1811                                         fakedb.cpv_inject(pkg)
1812
1813                         # Now that the vardb state is cached in our FakeVartree,
1814                         # we won't be needing the real vartree cache for awhile.
1815                         # To make some room on the heap, clear the vardbapi
1816                         # caches.
1817                         trees[myroot]["vartree"].dbapi._clear_cache()
1818                         gc.collect()
1819
1820                         self.mydbapi[myroot] = fakedb
1821                         def graph_tree():
1822                                 pass
1823                         graph_tree.dbapi = fakedb
1824                         self._graph_trees[myroot] = {}
1825                         self._filtered_trees[myroot] = {}
1826                         # Substitute the graph tree for the vartree in dep_check() since we
1827                         # want atom selections to be consistent with package selections
1828                         # have already been made.
1829                         self._graph_trees[myroot]["porttree"]   = graph_tree
1830                         self._graph_trees[myroot]["vartree"]    = graph_tree
1831                         def filtered_tree():
1832                                 pass
1833                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
1834                         self._filtered_trees[myroot]["porttree"] = filtered_tree
1835
1836                         # Passing in graph_tree as the vartree here could lead to better
1837                         # atom selections in some cases by causing atoms for packages that
1838                         # have been added to the graph to be preferred over other choices.
1839                         # However, it can trigger atom selections that result in
1840                         # unresolvable direct circular dependencies. For example, this
1841                         # happens with gwydion-dylan which depends on either itself or
1842                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
1843                         # gwydion-dylan-bin needs to be selected in order to avoid a
1844                         # an unresolvable direct circular dependency.
1845                         #
1846                         # To solve the problem described above, pass in "graph_db" so that
1847                         # packages that have been added to the graph are distinguishable
1848                         # from other available packages and installed packages. Also, pass
1849                         # the parent package into self._select_atoms() calls so that
1850                         # unresolvable direct circular dependencies can be detected and
1851                         # avoided when possible.
1852                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
1853                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
1854
1855                         dbs = []
1856                         portdb = self.trees[myroot]["porttree"].dbapi
1857                         bindb  = self.trees[myroot]["bintree"].dbapi
1858                         vardb  = self.trees[myroot]["vartree"].dbapi
1859                         #               (db, pkg_type, built, installed, db_keys)
1860                         if "--usepkgonly" not in self.myopts:
1861                                 db_keys = list(portdb._aux_cache_keys)
1862                                 dbs.append((portdb, "ebuild", False, False, db_keys))
1863                         if "--usepkg" in self.myopts:
1864                                 db_keys = list(bindb._aux_cache_keys)
1865                                 dbs.append((bindb,  "binary", True, False, db_keys))
1866                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
1867                         dbs.append((vardb, "installed", True, True, db_keys))
1868                         self._filtered_trees[myroot]["dbs"] = dbs
1869                         if "--usepkg" in self.myopts:
1870                                 self.trees[myroot]["bintree"].populate(
1871                                         "--getbinpkg" in self.myopts,
1872                                         "--getbinpkgonly" in self.myopts)
1873                 del trees
1874
1875                 self.digraph=portage.digraph()
1876                 # contains all sets added to the graph
1877                 self._sets = {}
1878                 # contains atoms given as arguments
1879                 self._sets["args"] = InternalPackageSet()
1880                 # contains all atoms from all sets added to the graph, including
1881                 # atoms given as arguments
1882                 self._set_atoms = InternalPackageSet()
1883                 self._atom_arg_map = {}
1884                 # contains all nodes pulled in by self._set_atoms
1885                 self._set_nodes = set()
1886                 # Contains only Blocker -> Uninstall edges
1887                 self._blocker_uninstalls = digraph()
1888                 # Contains only Package -> Blocker edges
1889                 self._blocker_parents = digraph()
1890                 # Contains only irrelevant Package -> Blocker edges
1891                 self._irrelevant_blockers = digraph()
1892                 # Contains only unsolvable Package -> Blocker edges
1893                 self._unsolvable_blockers = digraph()
1894                 # Contains all Blocker -> Blocked Package edges
1895                 self._blocked_pkgs = digraph()
1896                 # Contains world packages that have been protected from
1897                 # uninstallation but may not have been added to the graph
1898                 # if the graph is not complete yet.
1899                 self._blocked_world_pkgs = {}
1900                 self._slot_collision_info = {}
1901                 # Slot collision nodes are not allowed to block other packages since
1902                 # blocker validation is only able to account for one package per slot.
1903                 self._slot_collision_nodes = set()
1904                 self._parent_atoms = {}
1905                 self._slot_conflict_parent_atoms = set()
1906                 self._serialized_tasks_cache = None
1907                 self._scheduler_graph = None
1908                 self._displayed_list = None
1909                 self._pprovided_args = []
1910                 self._missing_args = []
1911                 self._masked_installed = set()
1912                 self._unsatisfied_deps_for_display = []
1913                 self._unsatisfied_blockers_for_display = None
1914                 self._circular_deps_for_display = None
1915                 self._dep_stack = []
1916                 self._dep_disjunctive_stack = []
1917                 self._unsatisfied_deps = []
1918                 self._initially_unsatisfied_deps = []
1919                 self._ignored_deps = []
1920                 self._required_set_names = set(["system", "world"])
1921                 self._select_atoms = self._select_atoms_highest_available
1922                 self._select_package = self._select_pkg_highest_available
1923                 self._highest_pkg_cache = {}
1924
1925         def _show_slot_collision_notice(self):
1926                 """Show an informational message advising the user to mask one of the
1927                 the packages. In some cases it may be possible to resolve this
1928                 automatically, but support for backtracking (removal nodes that have
1929                 already been selected) will be required in order to handle all possible
1930                 cases.
1931                 """
1932
1933                 if not self._slot_collision_info:
1934                         return
1935
1936                 self._show_merge_list()
1937
1938                 msg = []
1939                 msg.append("\n!!! Multiple package instances within a single " + \
1940                         "package slot have been pulled\n")
1941                 msg.append("!!! into the dependency graph, resulting" + \
1942                         " in a slot conflict:\n\n")
1943                 indent = "  "
1944                 # Max number of parents shown, to avoid flooding the display.
1945                 max_parents = 3
1946                 explanation_columns = 70
1947                 explanations = 0
1948                 for (slot_atom, root), slot_nodes \
1949                         in self._slot_collision_info.iteritems():
1950                         msg.append(str(slot_atom))
1951                         msg.append("\n\n")
1952
1953                         for node in slot_nodes:
1954                                 msg.append(indent)
1955                                 msg.append(str(node))
1956                                 parent_atoms = self._parent_atoms.get(node)
1957                                 if parent_atoms:
1958                                         pruned_list = set()
1959                                         # Prefer conflict atoms over others.
1960                                         for parent_atom in parent_atoms:
1961                                                 if len(pruned_list) >= max_parents:
1962                                                         break
1963                                                 if parent_atom in self._slot_conflict_parent_atoms:
1964                                                         pruned_list.add(parent_atom)
1965
1966                                         # If this package was pulled in by conflict atoms then
1967                                         # show those alone since those are the most interesting.
1968                                         if not pruned_list:
1969                                                 # When generating the pruned list, prefer instances
1970                                                 # of DependencyArg over instances of Package.
1971                                                 for parent_atom in parent_atoms:
1972                                                         if len(pruned_list) >= max_parents:
1973                                                                 break
1974                                                         parent, atom = parent_atom
1975                                                         if isinstance(parent, DependencyArg):
1976                                                                 pruned_list.add(parent_atom)
1977                                                 # Prefer Packages instances that themselves have been
1978                                                 # pulled into collision slots.
1979                                                 for parent_atom in parent_atoms:
1980                                                         if len(pruned_list) >= max_parents:
1981                                                                 break
1982                                                         parent, atom = parent_atom
1983                                                         if isinstance(parent, Package) and \
1984                                                                 (parent.slot_atom, parent.root) \
1985                                                                 in self._slot_collision_info:
1986                                                                 pruned_list.add(parent_atom)
1987                                                 for parent_atom in parent_atoms:
1988                                                         if len(pruned_list) >= max_parents:
1989                                                                 break
1990                                                         pruned_list.add(parent_atom)
1991                                         omitted_parents = len(parent_atoms) - len(pruned_list)
1992                                         parent_atoms = pruned_list
1993                                         msg.append(" pulled in by\n")
1994                                         for parent_atom in parent_atoms:
1995                                                 parent, atom = parent_atom
1996                                                 msg.append(2*indent)
1997                                                 if isinstance(parent,
1998                                                         (PackageArg, AtomArg)):
1999                                                         # For PackageArg and AtomArg types, it's
2000                                                         # redundant to display the atom attribute.
2001                                                         msg.append(str(parent))
2002                                                 else:
2003                                                         # Display the specific atom from SetArg or
2004                                                         # Package types.
2005                                                         msg.append("%s required by %s" % (atom, parent))
2006                                                 msg.append("\n")
2007                                         if omitted_parents:
2008                                                 msg.append(2*indent)
2009                                                 msg.append("(and %d more)\n" % omitted_parents)
2010                                 else:
2011                                         msg.append(" (no parents)\n")
2012                                 msg.append("\n")
2013                         explanation = self._slot_conflict_explanation(slot_nodes)
2014                         if explanation:
2015                                 explanations += 1
2016                                 msg.append(indent + "Explanation:\n\n")
2017                                 for line in textwrap.wrap(explanation, explanation_columns):
2018                                         msg.append(2*indent + line + "\n")
2019                                 msg.append("\n")
2020                 msg.append("\n")
2021                 sys.stderr.write("".join(msg))
2022                 sys.stderr.flush()
2023
2024                 explanations_for_all = explanations == len(self._slot_collision_info)
2025
2026                 if explanations_for_all or "--quiet" in self.myopts:
2027                         return
2028
2029                 msg = []
2030                 msg.append("It may be possible to solve this problem ")
2031                 msg.append("by using package.mask to prevent one of ")
2032                 msg.append("those packages from being selected. ")
2033                 msg.append("However, it is also possible that conflicting ")
2034                 msg.append("dependencies exist such that they are impossible to ")
2035                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
2036                 msg.append("the dependencies of two different packages, then those ")
2037                 msg.append("packages can not be installed simultaneously.")
2038
2039                 from formatter import AbstractFormatter, DumbWriter
2040                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
2041                 for x in msg:
2042                         f.add_flowing_data(x)
2043                 f.end_paragraph(1)
2044
2045                 msg = []
2046                 msg.append("For more information, see MASKED PACKAGES ")
2047                 msg.append("section in the emerge man page or refer ")
2048                 msg.append("to the Gentoo Handbook.")
2049                 for x in msg:
2050                         f.add_flowing_data(x)
2051                 f.end_paragraph(1)
2052                 f.writer.flush()
2053
2054         def _slot_conflict_explanation(self, slot_nodes):
2055                 """
2056                 When a slot conflict occurs due to USE deps, there are a few
2057                 different cases to consider:
2058
2059                 1) New USE are correctly set but --newuse wasn't requested so an
2060                    installed package with incorrect USE happened to get pulled
2061                    into graph before the new one.
2062
2063                 2) New USE are incorrectly set but an installed package has correct
2064                    USE so it got pulled into the graph, and a new instance also got
2065                    pulled in due to --newuse or an upgrade.
2066
2067                 3) Multiple USE deps exist that can't be satisfied simultaneously,
2068                    and multiple package instances got pulled into the same slot to
2069                    satisfy the conflicting deps.
2070
2071                 Currently, explanations and suggested courses of action are generated
2072                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
2073                 """
2074
2075                 if len(slot_nodes) != 2:
2076                         # Suggestions are only implemented for
2077                         # conflicts between two packages.
2078                         return None
2079
2080                 all_conflict_atoms = self._slot_conflict_parent_atoms
2081                 matched_node = None
2082                 matched_atoms = None
2083                 unmatched_node = None
2084                 for node in slot_nodes:
2085                         parent_atoms = self._parent_atoms.get(node)
2086                         if not parent_atoms:
2087                                 # Normally, there are always parent atoms. If there are
2088                                 # none then something unexpected is happening and there's
2089                                 # currently no suggestion for this case.
2090                                 return None
2091                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
2092                         for parent_atom in conflict_atoms:
2093                                 parent, atom = parent_atom
2094                                 if not atom.use:
2095                                         # Suggestions are currently only implemented for cases
2096                                         # in which all conflict atoms have USE deps.
2097                                         return None
2098                         if conflict_atoms:
2099                                 if matched_node is not None:
2100                                         # If conflict atoms match multiple nodes
2101                                         # then there's no suggestion.
2102                                         return None
2103                                 matched_node = node
2104                                 matched_atoms = conflict_atoms
2105                         else:
2106                                 if unmatched_node is not None:
2107                                         # Neither node is matched by conflict atoms, and
2108                                         # there is no suggestion for this case.
2109                                         return None
2110                                 unmatched_node = node
2111
2112                 if matched_node is None or unmatched_node is None:
2113                         # This shouldn't happen.
2114                         return None
2115
2116                 if unmatched_node.installed and not matched_node.installed and \
2117                         unmatched_node.cpv == matched_node.cpv:
2118                         # If the conflicting packages are the same version then
2119                         # --newuse should be all that's needed. If they are different
2120                         # versions then there's some other problem.
2121                         return "New USE are correctly set, but --newuse wasn't" + \
2122                                 " requested, so an installed package with incorrect USE " + \
2123                                 "happened to get pulled into the dependency graph. " + \
2124                                 "In order to solve " + \
2125                                 "this, either specify the --newuse option or explicitly " + \
2126                                 " reinstall '%s'." % matched_node.slot_atom
2127
2128                 if matched_node.installed and not unmatched_node.installed:
2129                         atoms = sorted(set(atom for parent, atom in matched_atoms))
2130                         explanation = ("New USE for '%s' are incorrectly set. " + \
2131                                 "In order to solve this, adjust USE to satisfy '%s'") % \
2132                                 (matched_node.slot_atom, atoms[0])
2133                         if len(atoms) > 1:
2134                                 for atom in atoms[1:-1]:
2135                                         explanation += ", '%s'" % (atom,)
2136                                 if len(atoms) > 2:
2137                                         explanation += ","
2138                                 explanation += " and '%s'" % (atoms[-1],)
2139                         explanation += "."
2140                         return explanation
2141
2142                 return None
2143
2144         def _process_slot_conflicts(self):
2145                 """
2146                 Process slot conflict data to identify specific atoms which
2147                 lead to conflict. These atoms only match a subset of the
2148                 packages that have been pulled into a given slot.
2149                 """
2150                 for (slot_atom, root), slot_nodes \
2151                         in self._slot_collision_info.iteritems():
2152
2153                         all_parent_atoms = set()
2154                         for pkg in slot_nodes:
2155                                 parent_atoms = self._parent_atoms.get(pkg)
2156                                 if not parent_atoms:
2157                                         continue
2158                                 all_parent_atoms.update(parent_atoms)
2159
2160                         for pkg in slot_nodes:
2161                                 parent_atoms = self._parent_atoms.get(pkg)
2162                                 if parent_atoms is None:
2163                                         parent_atoms = set()
2164                                         self._parent_atoms[pkg] = parent_atoms
2165                                 for parent_atom in all_parent_atoms:
2166                                         if parent_atom in parent_atoms:
2167                                                 continue
2168                                         # Use package set for matching since it will match via
2169                                         # PROVIDE when necessary, while match_from_list does not.
2170                                         parent, atom = parent_atom
2171                                         atom_set = InternalPackageSet(
2172                                                 initial_atoms=(atom,))
2173                                         if atom_set.findAtomForPackage(pkg):
2174                                                 parent_atoms.add(parent_atom)
2175                                         else:
2176                                                 self._slot_conflict_parent_atoms.add(parent_atom)
2177
2178         def _reinstall_for_flags(self, forced_flags,
2179                 orig_use, orig_iuse, cur_use, cur_iuse):
2180                 """Return a set of flags that trigger reinstallation, or None if there
2181                 are no such flags."""
2182                 if "--newuse" in self.myopts:
2183                         flags = set(orig_iuse.symmetric_difference(
2184                                 cur_iuse).difference(forced_flags))
2185                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
2186                                 cur_iuse.intersection(cur_use)))
2187                         if flags:
2188                                 return flags
2189                 elif "changed-use" == self.myopts.get("--reinstall"):
2190                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
2191                                 cur_iuse.intersection(cur_use))
2192                         if flags:
2193                                 return flags
2194                 return None
2195
2196         def _create_graph(self, allow_unsatisfied=False):
2197                 dep_stack = self._dep_stack
2198                 dep_disjunctive_stack = self._dep_disjunctive_stack
2199                 while dep_stack or dep_disjunctive_stack:
2200                         self.spinner.update()
2201                         while dep_stack:
2202                                 dep = dep_stack.pop()
2203                                 if isinstance(dep, Package):
2204                                         if not self._add_pkg_deps(dep,
2205                                                 allow_unsatisfied=allow_unsatisfied):
2206                                                 return 0
2207                                         continue
2208                                 if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
2209                                         return 0
2210                         if dep_disjunctive_stack:
2211                                 if not self._pop_disjunction(allow_unsatisfied):
2212                                         return 0
2213                 return 1
2214
2215         def _add_dep(self, dep, allow_unsatisfied=False):
2216                 debug = "--debug" in self.myopts
2217                 buildpkgonly = "--buildpkgonly" in self.myopts
2218                 nodeps = "--nodeps" in self.myopts
2219                 empty = "empty" in self.myparams
2220                 deep = "deep" in self.myparams
2221                 update = "--update" in self.myopts and dep.depth <= 1
2222                 if dep.blocker:
2223                         if not buildpkgonly and \
2224                                 not nodeps and \
2225                                 dep.parent not in self._slot_collision_nodes:
2226                                 if dep.parent.onlydeps:
2227                                         # It's safe to ignore blockers if the
2228                                         # parent is an --onlydeps node.
2229                                         return 1
2230                                 # The blocker applies to the root where
2231                                 # the parent is or will be installed.
2232                                 blocker = Blocker(atom=dep.atom,
2233                                         eapi=dep.parent.metadata["EAPI"],
2234                                         root=dep.parent.root)
2235                                 self._blocker_parents.add(blocker, dep.parent)
2236                         return 1
2237                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
2238                         onlydeps=dep.onlydeps)
2239                 if not dep_pkg:
2240                         if dep.priority.optional:
2241                                 # This could be an unecessary build-time dep
2242                                 # pulled in by --with-bdeps=y.
2243                                 return 1
2244                         if allow_unsatisfied:
2245                                 self._unsatisfied_deps.append(dep)
2246                                 return 1
2247                         self._unsatisfied_deps_for_display.append(
2248                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
2249                         return 0
2250                 # In some cases, dep_check will return deps that shouldn't
2251                 # be proccessed any further, so they are identified and
2252                 # discarded here. Try to discard as few as possible since
2253                 # discarded dependencies reduce the amount of information
2254                 # available for optimization of merge order.
2255                 if dep.priority.satisfied and \
2256                         not dep_pkg.installed and \
2257                         not (existing_node or empty or deep or update):
2258                         myarg = None
2259                         if dep.root == self.target_root:
2260                                 try:
2261                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
2262                                 except StopIteration:
2263                                         pass
2264                                 except portage.exception.InvalidDependString:
2265                                         if not dep_pkg.installed:
2266                                                 # This shouldn't happen since the package
2267                                                 # should have been masked.
2268                                                 raise
2269                         if not myarg:
2270                                 self._ignored_deps.append(dep)
2271                                 return 1
2272
2273                 if not self._add_pkg(dep_pkg, dep):
2274                         return 0
2275                 return 1
2276
2277         def _add_pkg(self, pkg, dep):
2278                 myparent = None
2279                 priority = None
2280                 depth = 0
2281                 if dep is None:
2282                         dep = Dependency()
2283                 else:
2284                         myparent = dep.parent
2285                         priority = dep.priority
2286                         depth = dep.depth
2287                 if priority is None:
2288                         priority = DepPriority()
2289                 """
2290                 Fills the digraph with nodes comprised of packages to merge.
2291                 mybigkey is the package spec of the package to merge.
2292                 myparent is the package depending on mybigkey ( or None )
2293                 addme = Should we add this package to the digraph or are we just looking at it's deps?
2294                         Think --onlydeps, we need to ignore packages in that case.
2295                 #stuff to add:
2296                 #SLOT-aware emerge
2297                 #IUSE-aware emerge -> USE DEP aware depgraph
2298                 #"no downgrade" emerge
2299                 """
2300                 # Ensure that the dependencies of the same package
2301                 # are never processed more than once.
2302                 previously_added = pkg in self.digraph
2303
2304                 # select the correct /var database that we'll be checking against
2305                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
2306                 pkgsettings = self.pkgsettings[pkg.root]
2307
2308                 arg_atoms = None
2309                 if True:
2310                         try:
2311                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
2312                         except portage.exception.InvalidDependString, e:
2313                                 if not pkg.installed:
2314                                         show_invalid_depstring_notice(
2315                                                 pkg, pkg.metadata["PROVIDE"], str(e))
2316                                         return 0
2317                                 del e
2318
2319                 if not pkg.onlydeps:
2320                         if not pkg.installed and \
2321                                 "empty" not in self.myparams and \
2322                                 vardbapi.match(pkg.slot_atom):
2323                                 # Increase the priority of dependencies on packages that
2324                                 # are being rebuilt. This optimizes merge order so that
2325                                 # dependencies are rebuilt/updated as soon as possible,
2326                                 # which is needed especially when emerge is called by
2327                                 # revdep-rebuild since dependencies may be affected by ABI
2328                                 # breakage that has rendered them useless. Don't adjust
2329                                 # priority here when in "empty" mode since all packages
2330                                 # are being merged in that case.
2331                                 priority.rebuild = True
2332
2333                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
2334                         slot_collision = False
2335                         if existing_node:
2336                                 existing_node_matches = pkg.cpv == existing_node.cpv
2337                                 if existing_node_matches and \
2338                                         pkg != existing_node and \
2339                                         dep.atom is not None:
2340                                         # Use package set for matching since it will match via
2341                                         # PROVIDE when necessary, while match_from_list does not.
2342                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
2343                                         if not atom_set.findAtomForPackage(existing_node):
2344                                                 existing_node_matches = False
2345                                 if existing_node_matches:
2346                                         # The existing node can be reused.
2347                                         if arg_atoms:
2348                                                 for parent_atom in arg_atoms:
2349                                                         parent, atom = parent_atom
2350                                                         self.digraph.add(existing_node, parent,
2351                                                                 priority=priority)
2352                                                         self._add_parent_atom(existing_node, parent_atom)
2353                                         # If a direct circular dependency is not an unsatisfied
2354                                         # buildtime dependency then drop it here since otherwise
2355                                         # it can skew the merge order calculation in an unwanted
2356                                         # way.
2357                                         if existing_node != myparent or \
2358                                                 (priority.buildtime and not priority.satisfied):
2359                                                 self.digraph.addnode(existing_node, myparent,
2360                                                         priority=priority)
2361                                                 if dep.atom is not None and dep.parent is not None:
2362                                                         self._add_parent_atom(existing_node,
2363                                                                 (dep.parent, dep.atom))
2364                                         return 1
2365                                 else:
2366
2367                                         # A slot collision has occurred.  Sometimes this coincides
2368                                         # with unresolvable blockers, so the slot collision will be
2369                                         # shown later if there are no unresolvable blockers.
2370                                         self._add_slot_conflict(pkg)
2371                                         slot_collision = True
2372
2373                         if slot_collision:
2374                                 # Now add this node to the graph so that self.display()
2375                                 # can show use flags and --tree portage.output.  This node is
2376                                 # only being partially added to the graph.  It must not be
2377                                 # allowed to interfere with the other nodes that have been
2378                                 # added.  Do not overwrite data for existing nodes in
2379                                 # self.mydbapi since that data will be used for blocker
2380                                 # validation.
2381                                 # Even though the graph is now invalid, continue to process
2382                                 # dependencies so that things like --fetchonly can still
2383                                 # function despite collisions.
2384                                 pass
2385                         elif not previously_added:
2386                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
2387                                 self.mydbapi[pkg.root].cpv_inject(pkg)
2388                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
2389
2390                         if not pkg.installed:
2391                                 # Allow this package to satisfy old-style virtuals in case it
2392                                 # doesn't already. Any pre-existing providers will be preferred
2393                                 # over this one.
2394                                 try:
2395                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
2396                                         # For consistency, also update the global virtuals.
2397                                         settings = self.roots[pkg.root].settings
2398                                         settings.unlock()
2399                                         settings.setinst(pkg.cpv, pkg.metadata)
2400                                         settings.lock()
2401                                 except portage.exception.InvalidDependString, e:
2402                                         show_invalid_depstring_notice(
2403                                                 pkg, pkg.metadata["PROVIDE"], str(e))
2404                                         del e
2405                                         return 0
2406
2407                 if arg_atoms:
2408                         self._set_nodes.add(pkg)
2409
2410                 # Do this even when addme is False (--onlydeps) so that the
2411                 # parent/child relationship is always known in case
2412                 # self._show_slot_collision_notice() needs to be called later.
2413                 self.digraph.add(pkg, myparent, priority=priority)
2414                 if dep.atom is not None and dep.parent is not None:
2415                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
2416
2417                 if arg_atoms:
2418                         for parent_atom in arg_atoms:
2419                                 parent, atom = parent_atom
2420                                 self.digraph.add(pkg, parent, priority=priority)
2421                                 self._add_parent_atom(pkg, parent_atom)
2422
2423                 """ This section determines whether we go deeper into dependencies or not.
2424                     We want to go deeper on a few occasions:
2425                     Installing package A, we need to make sure package A's deps are met.
2426                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
2427                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
2428                 """
2429                 dep_stack = self._dep_stack
2430                 if "recurse" not in self.myparams:
2431                         return 1
2432                 elif pkg.installed and \
2433                         "deep" not in self.myparams:
2434                         dep_stack = self._ignored_deps
2435
2436                 self.spinner.update()
2437
2438                 if arg_atoms:
2439                         depth = 0
2440                 pkg.depth = depth
2441                 if not previously_added:
2442                         dep_stack.append(pkg)
2443                 return 1
2444
2445         def _add_parent_atom(self, pkg, parent_atom):
2446                 parent_atoms = self._parent_atoms.get(pkg)
2447                 if parent_atoms is None:
2448                         parent_atoms = set()
2449                         self._parent_atoms[pkg] = parent_atoms
2450                 parent_atoms.add(parent_atom)
2451
2452         def _add_slot_conflict(self, pkg):
2453                 self._slot_collision_nodes.add(pkg)
2454                 slot_key = (pkg.slot_atom, pkg.root)
2455                 slot_nodes = self._slot_collision_info.get(slot_key)
2456                 if slot_nodes is None:
2457                         slot_nodes = set()
2458                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
2459                         self._slot_collision_info[slot_key] = slot_nodes
2460                 slot_nodes.add(pkg)
2461
2462         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
2463
2464                 mytype = pkg.type_name
2465                 myroot = pkg.root
2466                 mykey = pkg.cpv
2467                 metadata = pkg.metadata
2468                 myuse = pkg.use.enabled
2469                 jbigkey = pkg
2470                 depth = pkg.depth + 1
2471                 removal_action = "remove" in self.myparams
2472
2473                 edepend={}
2474                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
2475                 for k in depkeys:
2476                         edepend[k] = metadata[k]
2477
2478                 if not pkg.built and \
2479                         "--buildpkgonly" in self.myopts and \
2480                         "deep" not in self.myparams and \
2481                         "empty" not in self.myparams:
2482                         edepend["RDEPEND"] = ""
2483                         edepend["PDEPEND"] = ""
2484                 bdeps_optional = False
2485
2486                 if pkg.built and not removal_action:
2487                         if self.myopts.get("--with-bdeps", "n") == "y":
2488                                 # Pull in build time deps as requested, but marked them as
2489                                 # "optional" since they are not strictly required. This allows
2490                                 # more freedom in the merge order calculation for solving
2491                                 # circular dependencies. Don't convert to PDEPEND since that
2492                                 # could make --with-bdeps=y less effective if it is used to
2493                                 # adjust merge order to prevent built_with_use() calls from
2494                                 # failing.
2495                                 bdeps_optional = True
2496                         else:
2497                                 # built packages do not have build time dependencies.
2498                                 edepend["DEPEND"] = ""
2499
2500                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
2501                         edepend["DEPEND"] = ""
2502
2503                 bdeps_root = "/"
2504                 root_deps = self.myopts.get("--root-deps")
2505                 if root_deps is not None:
2506                         if root_deps is True:
2507                                 bdeps_root = myroot
2508                         elif root_deps == "rdeps":
2509                                 edepend["DEPEND"] = ""
2510
2511                 deps = (
2512                         (bdeps_root, edepend["DEPEND"],
2513                                 self._priority(buildtime=(not bdeps_optional),
2514                                 optional=bdeps_optional)),
2515                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
2516                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
2517                 )
2518
2519                 debug = "--debug" in self.myopts
2520                 strict = mytype != "installed"
2521                 try:
2522                         if not strict:
2523                                 portage.dep._dep_check_strict = False
2524
2525                         for dep_root, dep_string, dep_priority in deps:
2526                                 if not dep_string:
2527                                         continue
2528                                 if debug:
2529                                         print
2530                                         print "Parent:   ", jbigkey
2531                                         print "Depstring:", dep_string
2532                                         print "Priority:", dep_priority
2533
2534                                 try:
2535
2536                                         dep_string = portage.dep.paren_normalize(
2537                                                 portage.dep.use_reduce(
2538                                                 portage.dep.paren_reduce(dep_string),
2539                                                 uselist=pkg.use.enabled))
2540
2541                                         dep_string = list(self._queue_disjunctive_deps(
2542                                                 pkg, dep_root, dep_priority, dep_string))
2543
2544                                 except portage.exception.InvalidDependString, e:
2545                                         if pkg.installed:
2546                                                 del e
2547                                                 continue
2548                                         show_invalid_depstring_notice(pkg, dep_string, str(e))
2549                                         return 0
2550
2551                                 if not dep_string:
2552                                         continue
2553
2554                                 dep_string = portage.dep.paren_enclose(dep_string)
2555
2556                                 if not self._add_pkg_dep_string(
2557                                         pkg, dep_root, dep_priority, dep_string,
2558                                         allow_unsatisfied):
2559                                         return 0
2560
2561                 except portage.exception.AmbiguousPackageName, e:
2562                         pkgs = e.args[0]
2563                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
2564                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
2565                         for cpv in pkgs:
2566                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
2567                         portage.writemsg("\n", noiselevel=-1)
2568                         if mytype == "binary":
2569                                 portage.writemsg(
2570                                         "!!! This binary package cannot be installed: '%s'\n" % \
2571                                         mykey, noiselevel=-1)
2572                         elif mytype == "ebuild":
2573                                 portdb = self.roots[myroot].trees["porttree"].dbapi
2574                                 myebuild, mylocation = portdb.findname2(mykey)
2575                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
2576                                         "'%s'\n" % myebuild, noiselevel=-1)
2577                         portage.writemsg("!!! Please notify the package maintainer " + \
2578                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
2579                         return 0
2580                 finally:
2581                         portage.dep._dep_check_strict = True
2582                 return 1
2583
2584         def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
2585                 allow_unsatisfied):
2586                 depth = pkg.depth + 1
2587                 debug = "--debug" in self.myopts
2588                 strict = pkg.type_name != "installed"
2589
2590                 if debug:
2591                         print
2592                         print "Parent:   ", pkg
2593                         print "Depstring:", dep_string
2594                         print "Priority:", dep_priority
2595
2596                 try:
2597                         selected_atoms = self._select_atoms(dep_root,
2598                                 dep_string, myuse=pkg.use.enabled, parent=pkg,
2599                                 strict=strict, priority=dep_priority)
2600                 except portage.exception.InvalidDependString, e:
2601                         show_invalid_depstring_notice(pkg, dep_string, str(e))
2602                         del e
2603                         if pkg.installed:
2604                                 return 1
2605                         return 0
2606
2607                 if debug:
2608                         print "Candidates:", selected_atoms
2609
2610                 vardb = self.roots[dep_root].trees["vartree"].dbapi
2611
2612                 for atom in selected_atoms:
2613                         try:
2614
2615                                 atom = portage.dep.Atom(atom)
2616
2617                                 mypriority = dep_priority.copy()
2618                                 if not atom.blocker and vardb.match(atom):
2619                                         mypriority.satisfied = True
2620
2621                                 if not self._add_dep(Dependency(atom=atom,
2622                                         blocker=atom.blocker, depth=depth, parent=pkg,
2623                                         priority=mypriority, root=dep_root),
2624                                         allow_unsatisfied=allow_unsatisfied):
2625                                         return 0
2626
2627                         except portage.exception.InvalidAtom, e:
2628                                 show_invalid_depstring_notice(
2629                                         pkg, dep_string, str(e))
2630                                 del e
2631                                 if not pkg.installed:
2632                                         return 0
2633
2634                 if debug:
2635                         print "Exiting...", pkg
2636
2637                 return 1
2638
2639         def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
2640                 """
2641                 Queue disjunctive (virtual and ||) deps in self._dep_disjunctive_stack.
2642                 Yields non-disjunctive deps. Raises InvalidDependString when 
2643                 necessary.
2644                 """
2645                 i = 0
2646                 while i < len(dep_struct):
2647                         x = dep_struct[i]
2648                         if isinstance(x, list):
2649                                 for y in self._queue_disjunctive_deps(
2650                                         pkg, dep_root, dep_priority, x):
2651                                         yield y
2652                         elif x == "||":
2653                                 self._queue_disjunction(pkg, dep_root, dep_priority,
2654                                         [ x, dep_struct[ i + 1 ] ] )
2655                                 i += 1
2656                         else:
2657                                 try:
2658                                         x = portage.dep.Atom(x)
2659                                 except portage.exception.InvalidAtom:
2660                                         if not pkg.installed:
2661                                                 raise portage.exception.InvalidDependString(
2662                                                         "invalid atom: '%s'" % x)
2663                                 else:
2664                                         # Note: Eventually this will check for PROPERTIES=virtual
2665                                         # or whatever other metadata gets implemented for this
2666                                         # purpose.
2667                                         if x.cp.startswith('virtual/'):
2668                                                 self._queue_disjunction( pkg, dep_root,
2669                                                         dep_priority, [ str(x) ] )
2670                                         else:
2671                                                 yield str(x)
2672                         i += 1
2673
2674         def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
2675                 self._dep_disjunctive_stack.append(
2676                         (pkg, dep_root, dep_priority, dep_struct))
2677
2678         def _pop_disjunction(self, allow_unsatisfied):
2679                 """
2680                 Pop one disjunctive dep from self._dep_disjunctive_stack, and use it to
2681                 populate self._dep_stack.
2682                 """
2683                 pkg, dep_root, dep_priority, dep_struct = \
2684                         self._dep_disjunctive_stack.pop()
2685                 dep_string = portage.dep.paren_enclose(dep_struct)
2686                 if not self._add_pkg_dep_string(
2687                         pkg, dep_root, dep_priority, dep_string, allow_unsatisfied):
2688                         return 0
2689                 return 1
2690
2691         def _priority(self, **kwargs):
2692                 if "remove" in self.myparams:
2693                         priority_constructor = UnmergeDepPriority
2694                 else:
2695                         priority_constructor = DepPriority
2696                 return priority_constructor(**kwargs)
2697
2698         def _dep_expand(self, root_config, atom_without_category):
2699                 """
2700                 @param root_config: a root config instance
2701                 @type root_config: RootConfig
2702                 @param atom_without_category: an atom without a category component
2703                 @type atom_without_category: String
2704                 @rtype: list
2705                 @returns: a list of atoms containing categories (possibly empty)
2706                 """
2707                 null_cp = portage.dep_getkey(insert_category_into_atom(
2708                         atom_without_category, "null"))
2709                 cat, atom_pn = portage.catsplit(null_cp)
2710
2711                 dbs = self._filtered_trees[root_config.root]["dbs"]
2712                 categories = set()
2713                 for db, pkg_type, built, installed, db_keys in dbs:
2714                         for cat in db.categories:
2715                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
2716                                         categories.add(cat)
2717
2718                 deps = []
2719                 for cat in categories:
2720                         deps.append(insert_category_into_atom(
2721                                 atom_without_category, cat))
2722                 return deps
2723
2724         def _have_new_virt(self, root, atom_cp):
2725                 ret = False
2726                 for db, pkg_type, built, installed, db_keys in \
2727                         self._filtered_trees[root]["dbs"]:
2728                         if db.cp_list(atom_cp):
2729                                 ret = True
2730                                 break
2731                 return ret
2732
2733         def _iter_atoms_for_pkg(self, pkg):
2734                 # TODO: add multiple $ROOT support
2735                 if pkg.root != self.target_root:
2736                         return
2737                 atom_arg_map = self._atom_arg_map
2738                 root_config = self.roots[pkg.root]
2739                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
2740                         atom_cp = portage.dep_getkey(atom)
2741                         if atom_cp != pkg.cp and \
2742                                 self._have_new_virt(pkg.root, atom_cp):
2743                                 continue
2744                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
2745                         visible_pkgs.reverse() # descending order
2746                         higher_slot = None
2747                         for visible_pkg in visible_pkgs:
2748                                 if visible_pkg.cp != atom_cp:
2749                                         continue
2750                                 if pkg >= visible_pkg:
2751                                         # This is descending order, and we're not
2752                                         # interested in any versions <= pkg given.
2753                                         break
2754                                 if pkg.slot_atom != visible_pkg.slot_atom:
2755                                         higher_slot = visible_pkg
2756                                         break
2757                         if higher_slot is not None:
2758                                 continue
2759                         for arg in atom_arg_map[(atom, pkg.root)]:
2760                                 if isinstance(arg, PackageArg) and \
2761                                         arg.package != pkg:
2762                                         continue
2763                                 yield arg, atom
2764
2765         def select_files(self, myfiles):
2766                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
2767                 appropriate depgraph and return a favorite list."""
2768                 debug = "--debug" in self.myopts
2769                 root_config = self.roots[self.target_root]
2770                 sets = root_config.sets
2771                 getSetAtoms = root_config.setconfig.getSetAtoms
2772                 myfavorites=[]
2773                 myroot = self.target_root
2774                 dbs = self._filtered_trees[myroot]["dbs"]
2775                 vardb = self.trees[myroot]["vartree"].dbapi
2776                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
2777                 portdb = self.trees[myroot]["porttree"].dbapi
2778                 bindb = self.trees[myroot]["bintree"].dbapi
2779                 pkgsettings = self.pkgsettings[myroot]
2780                 args = []
2781                 onlydeps = "--onlydeps" in self.myopts
2782                 lookup_owners = []
2783                 for x in myfiles:
2784                         ext = os.path.splitext(x)[1]
2785                         if ext==".tbz2":
2786                                 if not os.path.exists(x):
2787                                         if os.path.exists(
2788                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
2789                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
2790                                         elif os.path.exists(
2791                                                 os.path.join(pkgsettings["PKGDIR"], x)):
2792                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
2793                                         else:
2794                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
2795                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
2796                                                 return 0, myfavorites
2797                                 mytbz2=portage.xpak.tbz2(x)
2798                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
2799                                 if os.path.realpath(x) != \
2800                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
2801                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
2802                                         return 0, myfavorites
2803                                 db_keys = list(bindb._aux_cache_keys)
2804                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
2805                                 pkg = Package(type_name="binary", root_config=root_config,
2806                                         cpv=mykey, built=True, metadata=metadata,
2807                                         onlydeps=onlydeps)
2808                                 self._pkg_cache[pkg] = pkg
2809                                 args.append(PackageArg(arg=x, package=pkg,
2810                                         root_config=root_config))
2811                         elif ext==".ebuild":
2812                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
2813                                 pkgdir = os.path.dirname(ebuild_path)
2814                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
2815                                 cp = pkgdir[len(tree_root)+1:]
2816                                 e = portage.exception.PackageNotFound(
2817                                         ("%s is not in a valid portage tree " + \
2818                                         "hierarchy or does not exist") % x)
2819                                 if not portage.isvalidatom(cp):
2820                                         raise e
2821                                 cat = portage.catsplit(cp)[0]
2822                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
2823                                 if not portage.isvalidatom("="+mykey):
2824                                         raise e
2825                                 ebuild_path = portdb.findname(mykey)
2826                                 if ebuild_path:
2827                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
2828                                                 cp, os.path.basename(ebuild_path)):
2829                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
2830                                                 return 0, myfavorites
2831                                         if mykey not in portdb.xmatch(
2832                                                 "match-visible", portage.dep_getkey(mykey)):
2833                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
2834                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
2835                                                 print colorize("BAD", "*** page for details.")
2836                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
2837                                                         "Continuing...")
2838                                 else:
2839                                         raise portage.exception.PackageNotFound(
2840                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
2841                                 db_keys = list(portdb._aux_cache_keys)
2842                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
2843                                 pkg = Package(type_name="ebuild", root_config=root_config,
2844                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
2845                                 pkgsettings.setcpv(pkg)
2846                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
2847                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
2848                                 self._pkg_cache[pkg] = pkg
2849                                 args.append(PackageArg(arg=x, package=pkg,
2850                                         root_config=root_config))
2851                         elif x.startswith(os.path.sep):
2852                                 if not x.startswith(myroot):
2853                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
2854                                                 " $ROOT.\n") % x, noiselevel=-1)
2855                                         return 0, []
2856                                 # Queue these up since it's most efficient to handle
2857                                 # multiple files in a single iter_owners() call.
2858                                 lookup_owners.append(x)
2859                         else:
2860                                 if x in ("system", "world"):
2861                                         x = SETPREFIX + x
2862                                 if x.startswith(SETPREFIX):
2863                                         s = x[len(SETPREFIX):]
2864                                         if s not in sets:
2865                                                 raise portage.exception.PackageSetNotFound(s)
2866                                         if s in self._sets:
2867                                                 continue
2868                                         # Recursively expand sets so that containment tests in
2869                                         # self._get_parent_sets() properly match atoms in nested
2870                                         # sets (like if world contains system).
2871                                         expanded_set = InternalPackageSet(
2872                                                 initial_atoms=getSetAtoms(s))
2873                                         self._sets[s] = expanded_set
2874                                         args.append(SetArg(arg=x, set=expanded_set,
2875                                                 root_config=root_config))
2876                                         continue
2877                                 if not is_valid_package_atom(x):
2878                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
2879                                                 noiselevel=-1)
2880                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
2881                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
2882                                         return (0,[])
2883                                 # Don't expand categories or old-style virtuals here unless
2884                                 # necessary. Expansion of old-style virtuals here causes at
2885                                 # least the following problems:
2886                                 #   1) It's more difficult to determine which set(s) an atom
2887                                 #      came from, if any.
2888                                 #   2) It takes away freedom from the resolver to choose other
2889                                 #      possible expansions when necessary.
2890                                 if "/" in x:
2891                                         args.append(AtomArg(arg=x, atom=x,
2892                                                 root_config=root_config))
2893                                         continue
2894                                 expanded_atoms = self._dep_expand(root_config, x)
2895                                 installed_cp_set = set()
2896                                 for atom in expanded_atoms:
2897                                         atom_cp = portage.dep_getkey(atom)
2898                                         if vardb.cp_list(atom_cp):
2899                                                 installed_cp_set.add(atom_cp)
2900
2901                                 if len(installed_cp_set) > 1:
2902                                         non_virtual_cps = set()
2903                                         for atom_cp in installed_cp_set:
2904                                                 if not atom_cp.startswith("virtual/"):
2905                                                         non_virtual_cps.add(atom_cp)
2906                                         if len(non_virtual_cps) == 1:
2907                                                 installed_cp_set = non_virtual_cps
2908
2909                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
2910                                         installed_cp = iter(installed_cp_set).next()
2911                                         expanded_atoms = [atom for atom in expanded_atoms \
2912                                                 if portage.dep_getkey(atom) == installed_cp]
2913
2914                                 if len(expanded_atoms) > 1:
2915                                         print
2916                                         print
2917                                         ambiguous_package_name(x, expanded_atoms, root_config,
2918                                                 self.spinner, self.myopts)
2919                                         return False, myfavorites
2920                                 if expanded_atoms:
2921                                         atom = expanded_atoms[0]
2922                                 else:
2923                                         null_atom = insert_category_into_atom(x, "null")
2924                                         null_cp = portage.dep_getkey(null_atom)
2925                                         cat, atom_pn = portage.catsplit(null_cp)
2926                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
2927                                         if virts_p:
2928                                                 # Allow the depgraph to choose which virtual.
2929                                                 atom = insert_category_into_atom(x, "virtual")
2930                                         else:
2931                                                 atom = insert_category_into_atom(x, "null")
2932
2933                                 args.append(AtomArg(arg=x, atom=atom,
2934                                         root_config=root_config))
2935
2936                 if lookup_owners:
2937                         relative_paths = []
2938                         search_for_multiple = False
2939                         if len(lookup_owners) > 1:
2940                                 search_for_multiple = True
2941
2942                         for x in lookup_owners:
2943                                 if not search_for_multiple and os.path.isdir(x):
2944                                         search_for_multiple = True
2945                                 relative_paths.append(x[len(myroot):])
2946
2947                         owners = set()
2948                         for pkg, relative_path in \
2949                                 real_vardb._owners.iter_owners(relative_paths):
2950                                 owners.add(pkg.mycpv)
2951                                 if not search_for_multiple:
2952                                         break
2953
2954                         if not owners:
2955                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
2956                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
2957                                 return 0, []
2958
2959                         for cpv in owners:
2960                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
2961                                 if not slot:
2962                                         # portage now masks packages with missing slot, but it's
2963                                         # possible that one was installed by an older version
2964                                         atom = portage.cpv_getkey(cpv)
2965                                 else:
2966                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
2967                                 args.append(AtomArg(arg=atom, atom=atom,
2968                                         root_config=root_config))
2969
2970                 if "--update" in self.myopts:
2971                         # In some cases, the greedy slots behavior can pull in a slot that
2972                         # the user would want to uninstall due to it being blocked by a
2973                         # newer version in a different slot. Therefore, it's necessary to
2974                         # detect and discard any that should be uninstalled. Each time
2975                         # that arguments are updated, package selections are repeated in
2976                         # order to ensure consistency with the current arguments:
2977                         #
2978                         #  1) Initialize args
2979                         #  2) Select packages and generate initial greedy atoms
2980                         #  3) Update args with greedy atoms
2981                         #  4) Select packages and generate greedy atoms again, while
2982                         #     accounting for any blockers between selected packages
2983                         #  5) Update args with revised greedy atoms
2984
2985                         self._set_args(args)
2986                         greedy_args = []
2987                         for arg in args:
2988                                 greedy_args.append(arg)
2989                                 if not isinstance(arg, AtomArg):
2990                                         continue
2991                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
2992                                         greedy_args.append(
2993                                                 AtomArg(arg=arg.arg, atom=atom,
2994                                                         root_config=arg.root_config))
2995
2996                         self._set_args(greedy_args)
2997                         del greedy_args
2998
2999                         # Revise greedy atoms, accounting for any blockers
3000                         # between selected packages.
3001                         revised_greedy_args = []
3002                         for arg in args:
3003                                 revised_greedy_args.append(arg)
3004                                 if not isinstance(arg, AtomArg):
3005                                         continue
3006                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
3007                                         blocker_lookahead=True):
3008                                         revised_greedy_args.append(
3009                                                 AtomArg(arg=arg.arg, atom=atom,
3010                                                         root_config=arg.root_config))
3011                         args = revised_greedy_args
3012                         del revised_greedy_args
3013
3014                 self._set_args(args)
3015
3016                 myfavorites = set(myfavorites)
3017                 for arg in args:
3018                         if isinstance(arg, (AtomArg, PackageArg)):
3019                                 myfavorites.add(arg.atom)
3020                         elif isinstance(arg, SetArg):
3021                                 myfavorites.add(arg.arg)
3022                 myfavorites = list(myfavorites)
3023
3024                 pprovideddict = pkgsettings.pprovideddict
3025                 if debug:
3026                         portage.writemsg("\n", noiselevel=-1)
3027                 # Order needs to be preserved since a feature of --nodeps
3028                 # is to allow the user to force a specific merge order.
3029                 args.reverse()
3030                 while args:
3031                         arg = args.pop()
3032                         for atom in arg.set:
3033                                 self.spinner.update()
3034                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
3035                                         root=myroot, parent=arg)
3036                                 atom_cp = portage.dep_getkey(atom)
3037                                 try:
3038                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
3039                                         if pprovided and portage.match_from_list(atom, pprovided):
3040                                                 # A provided package has been specified on the command line.
3041                                                 self._pprovided_args.append((arg, atom))
3042                                                 continue
3043                                         if isinstance(arg, PackageArg):
3044                                                 if not self._add_pkg(arg.package, dep) or \
3045                                                         not self._create_graph():
3046                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
3047                                                                 "dependencies for %s\n") % arg.arg)
3048                                                         return 0, myfavorites
3049                                                 continue
3050                                         if debug:
3051                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
3052                                                         (arg, atom), noiselevel=-1)
3053                                         pkg, existing_node = self._select_package(
3054                                                 myroot, atom, onlydeps=onlydeps)
3055                                         if not pkg:
3056                                                 if not (isinstance(arg, SetArg) and \
3057                                                         arg.name in ("system", "world")):
3058                                                         self._unsatisfied_deps_for_display.append(
3059                                                                 ((myroot, atom), {}))
3060                                                         return 0, myfavorites
3061                                                 self._missing_args.append((arg, atom))
3062                                                 continue
3063                                         if atom_cp != pkg.cp:
3064                                                 # For old-style virtuals, we need to repeat the
3065                                                 # package.provided check against the selected package.
3066                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
3067                                                 pprovided = pprovideddict.get(pkg.cp)
3068                                                 if pprovided and \
3069                                                         portage.match_from_list(expanded_atom, pprovided):
3070                                                         # A provided package has been
3071                                                         # specified on the command line.
3072                                                         self._pprovided_args.append((arg, atom))
3073                                                         continue
3074                                         if pkg.installed and "selective" not in self.myparams:
3075                                                 self._unsatisfied_deps_for_display.append(
3076                                                         ((myroot, atom), {}))
3077                                                 # Previous behavior was to bail out in this case, but
3078                                                 # since the dep is satisfied by the installed package,
3079                                                 # it's more friendly to continue building the graph
3080                                                 # and just show a warning message. Therefore, only bail
3081                                                 # out here if the atom is not from either the system or
3082                                                 # world set.
3083                                                 if not (isinstance(arg, SetArg) and \
3084                                                         arg.name in ("system", "world")):
3085                                                         return 0, myfavorites
3086
3087                                         # Add the selected package to the graph as soon as possible
3088                                         # so that later dep_check() calls can use it as feedback
3089                                         # for making more consistent atom selections.
3090                                         if not self._add_pkg(pkg, dep):
3091                                                 if isinstance(arg, SetArg):
3092                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
3093                                                                 "dependencies for %s from %s\n") % \
3094                                                                 (atom, arg.arg))
3095                                                 else:
3096                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
3097                                                                 "dependencies for %s\n") % atom)
3098                                                 return 0, myfavorites
3099
3100                                 except portage.exception.MissingSignature, e:
3101                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
3102                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3103                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3104                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3105                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3106                                         return 0, myfavorites
3107                                 except portage.exception.InvalidSignature, e:
3108                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
3109                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
3110                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
3111                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
3112                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
3113                                         return 0, myfavorites
3114                                 except SystemExit, e:
3115                                         raise # Needed else can't exit
3116                                 except Exception, e:
3117                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
3118                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
3119                                         raise
3120
3121                 # Now that the root packages have been added to the graph,
3122                 # process the dependencies.
3123                 if not self._create_graph():
3124                         return 0, myfavorites
3125
3126                 missing=0
3127                 if "--usepkgonly" in self.myopts:
3128                         for xs in self.digraph.all_nodes():
3129                                 if not isinstance(xs, Package):
3130                                         continue
3131                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
3132                                         if missing == 0:
3133                                                 print
3134                                         missing += 1
3135                                         print "Missing binary for:",xs[2]
3136
3137                 try:
3138                         self.altlist()
3139                 except self._unknown_internal_error:
3140                         return False, myfavorites
3141
3142                 # We're true here unless we are missing binaries.
3143                 return (not missing,myfavorites)
3144
3145         def _set_args(self, args):
3146                 """
3147                 Create the "args" package set from atoms and packages given as
3148                 arguments. This method can be called multiple times if necessary.
3149                 The package selection cache is automatically invalidated, since
3150                 arguments influence package selections.
3151                 """
3152                 args_set = self._sets["args"]
3153                 args_set.clear()
3154                 for arg in args:
3155                         if not isinstance(arg, (AtomArg, PackageArg)):
3156                                 continue
3157                         atom = arg.atom
3158                         if atom in args_set:
3159                                 continue
3160                         args_set.add(atom)
3161
3162                 self._set_atoms.clear()
3163                 self._set_atoms.update(chain(*self._sets.itervalues()))
3164                 atom_arg_map = self._atom_arg_map
3165                 atom_arg_map.clear()
3166                 for arg in args:
3167                         for atom in arg.set:
3168                                 atom_key = (atom, arg.root_config.root)
3169                                 refs = atom_arg_map.get(atom_key)
3170                                 if refs is None:
3171                                         refs = []
3172                                         atom_arg_map[atom_key] = refs
3173                                         if arg not in refs:
3174                                                 refs.append(arg)
3175
3176                 # Invalidate the package selection cache, since
3177                 # arguments influence package selections.
3178                 self._highest_pkg_cache.clear()
3179                 for trees in self._filtered_trees.itervalues():
3180                         trees["porttree"].dbapi._clear_cache()
3181
3182         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
3183                 """
3184                 Return a list of slot atoms corresponding to installed slots that
3185                 differ from the slot of the highest visible match. When
3186                 blocker_lookahead is True, slot atoms that would trigger a blocker
3187                 conflict are automatically discarded, potentially allowing automatic
3188                 uninstallation of older slots when appropriate.
3189                 """
3190                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
3191                 if highest_pkg is None:
3192                         return []
3193                 vardb = root_config.trees["vartree"].dbapi
3194                 slots = set()
3195                 for cpv in vardb.match(atom):
3196                         # don't mix new virtuals with old virtuals
3197                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
3198                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
3199
3200                 slots.add(highest_pkg.metadata["SLOT"])
3201                 if len(slots) == 1:
3202                         return []
3203                 greedy_pkgs = []
3204                 slots.remove(highest_pkg.metadata["SLOT"])
3205                 while slots:
3206                         slot = slots.pop()
3207                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
3208                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
3209                         if pkg is not None and \
3210                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
3211                                 greedy_pkgs.append(pkg)
3212                 if not greedy_pkgs:
3213                         return []
3214                 if not blocker_lookahead:
3215                         return [pkg.slot_atom for pkg in greedy_pkgs]
3216
3217                 blockers = {}
3218                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
3219                 for pkg in greedy_pkgs + [highest_pkg]:
3220                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
3221                         try:
3222                                 atoms = self._select_atoms(
3223                                         pkg.root, dep_str, pkg.use.enabled,
3224                                         parent=pkg, strict=True)
3225                         except portage.exception.InvalidDependString:
3226                                 continue
3227                         blocker_atoms = (x for x in atoms if x.blocker)
3228                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
3229
3230                 if highest_pkg not in blockers:
3231                         return []
3232
3233                 # filter packages with invalid deps
3234                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
3235
3236                 # filter packages that conflict with highest_pkg
3237                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
3238                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
3239                         blockers[pkg].findAtomForPackage(highest_pkg))]
3240
3241                 if not greedy_pkgs:
3242                         return []
3243
3244                 # If two packages conflict, discard the lower version.
3245                 discard_pkgs = set()
3246                 greedy_pkgs.sort(reverse=True)
3247                 for i in xrange(len(greedy_pkgs) - 1):
3248                         pkg1 = greedy_pkgs[i]
3249                         if pkg1 in discard_pkgs:
3250                                 continue
3251                         for j in xrange(i + 1, len(greedy_pkgs)):
3252                                 pkg2 = greedy_pkgs[j]
3253                                 if pkg2 in discard_pkgs:
3254                                         continue
3255                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
3256                                         blockers[pkg2].findAtomForPackage(pkg1):
3257                                         # pkg1 > pkg2
3258                                         discard_pkgs.add(pkg2)
3259
3260                 return [pkg.slot_atom for pkg in greedy_pkgs \
3261                         if pkg not in discard_pkgs]
3262
3263         def _select_atoms_from_graph(self, *pargs, **kwargs):
3264                 """
3265                 Prefer atoms matching packages that have already been
3266                 added to the graph or those that are installed and have
3267                 not been scheduled for replacement.
3268                 """
3269                 kwargs["trees"] = self._graph_trees
3270                 return self._select_atoms_highest_available(*pargs, **kwargs)
3271
3272         def _select_atoms_highest_available(self, root, depstring,
3273                 myuse=None, parent=None, strict=True, trees=None, priority=None):
3274                 """This will raise InvalidDependString if necessary. If trees is
3275                 None then self._filtered_trees is used."""
3276                 pkgsettings = self.pkgsettings[root]
3277                 if trees is None:
3278                         trees = self._filtered_trees
3279                 if not getattr(priority, "buildtime", False):
3280                         # The parent should only be passed to dep_check() for buildtime
3281                         # dependencies since that's the only case when it's appropriate
3282                         # to trigger the circular dependency avoidance code which uses it.
3283                         # It's important not to trigger the same circular dependency
3284                         # avoidance code for runtime dependencies since it's not needed
3285                         # and it can promote an incorrect package choice.
3286                         parent = None
3287                 if True:
3288                         try:
3289                                 if parent is not None:
3290                                         trees[root]["parent"] = parent
3291                                 if not strict:
3292                                         portage.dep._dep_check_strict = False
3293                                 mycheck = portage.dep_check(depstring, None,
3294                                         pkgsettings, myuse=myuse,
3295                                         myroot=root, trees=trees)
3296                         finally:
3297                                 if parent is not None:
3298                                         trees[root].pop("parent")
3299                                 portage.dep._dep_check_strict = True
3300                         if not mycheck[0]:
3301                                 raise portage.exception.InvalidDependString(mycheck[1])
3302                         selected_atoms = mycheck[1]
3303                 return selected_atoms
3304
3305         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
3306                 atom = portage.dep.Atom(atom)
3307                 atom_set = InternalPackageSet(initial_atoms=(atom,))
3308                 atom_without_use = atom
3309                 if atom.use:
3310                         atom_without_use = portage.dep.remove_slot(atom)
3311                         if atom.slot:
3312                                 atom_without_use += ":" + atom.slot
3313                         atom_without_use = portage.dep.Atom(atom_without_use)
3314                 xinfo = '"%s"' % atom
3315                 if arg:
3316                         xinfo='"%s"' % arg
3317                 # Discard null/ from failed cpv_expand category expansion.
3318                 xinfo = xinfo.replace("null/", "")
3319                 masked_packages = []
3320                 missing_use = []
3321                 masked_pkg_instances = set()
3322                 missing_licenses = []
3323                 have_eapi_mask = False
3324                 pkgsettings = self.pkgsettings[root]
3325                 implicit_iuse = pkgsettings._get_implicit_iuse()
3326                 root_config = self.roots[root]
3327                 portdb = self.roots[root].trees["porttree"].dbapi
3328                 dbs = self._filtered_trees[root]["dbs"]
3329                 for db, pkg_type, built, installed, db_keys in dbs:
3330                         if installed:
3331                                 continue
3332                         match = db.match
3333                         if hasattr(db, "xmatch"):
3334                                 cpv_list = db.xmatch("match-all", atom_without_use)
3335                         else:
3336                                 cpv_list = db.match(atom_without_use)
3337                         # descending order
3338                         cpv_list.reverse()
3339                         for cpv in cpv_list:
3340                                 metadata, mreasons  = get_mask_info(root_config, cpv,
3341                                         pkgsettings, db, pkg_type, built, installed, db_keys)
3342                                 if metadata is not None:
3343                                         pkg = Package(built=built, cpv=cpv,
3344                                                 installed=installed, metadata=metadata,
3345                                                 root_config=root_config)
3346                                         if pkg.cp != atom.cp:
3347                                                 # A cpv can be returned from dbapi.match() as an
3348                                                 # old-style virtual match even in cases when the
3349                                                 # package does not actually PROVIDE the virtual.
3350                                                 # Filter out any such false matches here.
3351                                                 if not atom_set.findAtomForPackage(pkg):
3352                                                         continue
3353                                         if mreasons:
3354                                                 masked_pkg_instances.add(pkg)
3355                                         if atom.use:
3356                                                 missing_use.append(pkg)
3357                                                 if not mreasons:
3358                                                         continue
3359                                 masked_packages.append(
3360                                         (root_config, pkgsettings, cpv, metadata, mreasons))
3361
3362                 missing_use_reasons = []
3363                 missing_iuse_reasons = []
3364                 for pkg in missing_use:
3365                         use = pkg.use.enabled
3366                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
3367                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
3368                         missing_iuse = []
3369                         for x in atom.use.required:
3370                                 if iuse_re.match(x) is None:
3371                                         missing_iuse.append(x)
3372                         mreasons = []
3373                         if missing_iuse:
3374                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
3375                                 missing_iuse_reasons.append((pkg, mreasons))
3376                         else:
3377                                 need_enable = sorted(atom.use.enabled.difference(use))
3378                                 need_disable = sorted(atom.use.disabled.intersection(use))
3379                                 if need_enable or need_disable:
3380                                         changes = []
3381                                         changes.extend(colorize("red", "+" + x) \
3382                                                 for x in need_enable)
3383                                         changes.extend(colorize("blue", "-" + x) \
3384                                                 for x in need_disable)
3385                                         mreasons.append("Change USE: %s" % " ".join(changes))
3386                                         missing_use_reasons.append((pkg, mreasons))
3387
3388                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3389                         in missing_use_reasons if pkg not in masked_pkg_instances]
3390
3391                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
3392                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
3393
3394                 show_missing_use = False
3395                 if unmasked_use_reasons:
3396                         # Only show the latest version.
3397                         show_missing_use = unmasked_use_reasons[:1]
3398                 elif unmasked_iuse_reasons:
3399                         if missing_use_reasons:
3400                                 # All packages with required IUSE are masked,
3401                                 # so display a normal masking message.
3402                                 pass
3403                         else:
3404                                 show_missing_use = unmasked_iuse_reasons
3405
3406                 if show_missing_use:
3407                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
3408                         print "!!! One of the following packages is required to complete your request:"
3409                         for pkg, mreasons in show_missing_use:
3410                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
3411
3412                 elif masked_packages:
3413                         print "\n!!! " + \
3414                                 colorize("BAD", "All ebuilds that could satisfy ") + \
3415                                 colorize("INFORM", xinfo) + \
3416                                 colorize("BAD", " have been masked.")
3417                         print "!!! One of the following masked packages is required to complete your request:"
3418                         have_eapi_mask = show_masked_packages(masked_packages)
3419                         if have_eapi_mask:
3420                                 print
3421                                 msg = ("The current version of portage supports " + \
3422                                         "EAPI '%s'. You must upgrade to a newer version" + \
3423                                         " of portage before EAPI masked packages can" + \
3424                                         " be installed.") % portage.const.EAPI
3425                                 from textwrap import wrap
3426                                 for line in wrap(msg, 75):
3427                                         print line
3428                         print
3429                         show_mask_docs()
3430                 else:
3431                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
3432
3433                 # Show parent nodes and the argument that pulled them in.
3434                 traversed_nodes = set()
3435                 node = myparent
3436                 msg = []
3437                 while node is not None:
3438                         traversed_nodes.add(node)
3439                         msg.append('(dependency required by "%s" [%s])' % \
3440                                 (colorize('INFORM', str(node.cpv)), node.type_name))
3441                         # When traversing to parents, prefer arguments over packages
3442                         # since arguments are root nodes. Never traverse the same
3443                         # package twice, in order to prevent an infinite loop.
3444                         selected_parent = None
3445                         for parent in self.digraph.parent_nodes(node):
3446                                 if isinstance(parent, DependencyArg):
3447                                         msg.append('(dependency required by "%s" [argument])' % \
3448                                                 (colorize('INFORM', str(parent))))
3449                                         selected_parent = None
3450                                         break
3451                                 if parent not in traversed_nodes:
3452                                         selected_parent = parent
3453                         node = selected_parent
3454                 for line in msg:
3455                         print line
3456
3457                 print
3458
3459         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
3460                 cache_key = (root, atom, onlydeps)
3461                 ret = self._highest_pkg_cache.get(cache_key)
3462                 if ret is not None:
3463                         pkg, existing = ret
3464                         if pkg and not existing:
3465                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
3466                                 if existing and existing == pkg:
3467                                         # Update the cache to reflect that the
3468                                         # package has been added to the graph.
3469                                         ret = pkg, pkg
3470                                         self._highest_pkg_cache[cache_key] = ret
3471                         return ret
3472                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
3473                 self._highest_pkg_cache[cache_key] = ret
3474                 pkg, existing = ret
3475                 if pkg is not None:
3476                         settings = pkg.root_config.settings
3477                         if visible(settings, pkg) and not (pkg.installed and \
3478                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
3479                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
3480                 return ret
3481
3482         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
3483                 root_config = self.roots[root]
3484                 pkgsettings = self.pkgsettings[root]
3485                 dbs = self._filtered_trees[root]["dbs"]
3486                 vardb = self.roots[root].trees["vartree"].dbapi
3487                 portdb = self.roots[root].trees["porttree"].dbapi
3488                 # List of acceptable packages, ordered by type preference.
3489                 matched_packages = []
3490                 highest_version = None
3491                 if not isinstance(atom, portage.dep.Atom):
3492                         atom = portage.dep.Atom(atom)
3493                 atom_cp = atom.cp
3494                 atom_set = InternalPackageSet(initial_atoms=(atom,))
3495                 existing_node = None
3496                 myeb = None
3497                 usepkgonly = "--usepkgonly" in self.myopts
3498                 empty = "empty" in self.myparams
3499                 selective = "selective" in self.myparams
3500                 reinstall = False
3501                 noreplace = "--noreplace" in self.myopts
3502                 # Behavior of the "selective" parameter depends on
3503                 # whether or not a package matches an argument atom.
3504                 # If an installed package provides an old-style
3505                 # virtual that is no longer provided by an available
3506                 # package, the installed package may match an argument
3507                 # atom even though none of the available packages do.
3508                 # Therefore, "selective" logic does not consider
3509                 # whether or not an installed package matches an
3510                 # argument atom. It only considers whether or not
3511                 # available packages match argument atoms, which is
3512                 # represented by the found_available_arg flag.
3513                 found_available_arg = False
3514                 for find_existing_node in True, False:
3515                         if existing_node:
3516                                 break
3517                         for db, pkg_type, built, installed, db_keys in dbs:
3518                                 if existing_node:
3519                                         break
3520                                 if installed and not find_existing_node:
3521                                         want_reinstall = reinstall or empty or \
3522                                                 (found_available_arg and not selective)
3523                                         if want_reinstall and matched_packages:
3524                                                 continue
3525                                 if hasattr(db, "xmatch"):
3526                                         cpv_list = db.xmatch("match-all", atom)
3527                                 else:
3528                                         cpv_list = db.match(atom)
3529
3530                                 # USE=multislot can make an installed package appear as if
3531                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
3532                                 # won't do any good as long as USE=multislot is enabled since
3533                                 # the newly built package still won't have the expected slot.
3534                                 # Therefore, assume that such SLOT dependencies are already
3535                                 # satisfied rather than forcing a rebuild.
3536                                 if installed and not cpv_list and atom.slot:
3537                                         for cpv in db.match(atom.cp):
3538                                                 slot_available = False
3539                                                 for other_db, other_type, other_built, \
3540                                                         other_installed, other_keys in dbs:
3541                                                         try:
3542                                                                 if atom.slot == \
3543                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
3544                                                                         slot_available = True
3545                                                                         break
3546                                                         except KeyError:
3547                                                                 pass
3548                                                 if not slot_available:
3549                                                         continue
3550                                                 inst_pkg = self._pkg(cpv, "installed",
3551                                                         root_config, installed=installed)
3552                                                 # Remove the slot from the atom and verify that
3553                                                 # the package matches the resulting atom.
3554                                                 atom_without_slot = portage.dep.remove_slot(atom)
3555                                                 if atom.use:
3556                                                         atom_without_slot += str(atom.use)
3557                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
3558                                                 if portage.match_from_list(
3559                                                         atom_without_slot, [inst_pkg]):
3560                                                         cpv_list = [inst_pkg.cpv]
3561                                                 break
3562
3563                                 if not cpv_list:
3564                                         continue
3565                                 pkg_status = "merge"
3566                                 if installed or onlydeps:
3567                                         pkg_status = "nomerge"
3568                                 # descending order
3569                                 cpv_list.reverse()
3570                                 for cpv in cpv_list:
3571                                         # Make --noreplace take precedence over --newuse.
3572                                         if not installed and noreplace and \
3573                                                 cpv in vardb.match(atom):
3574                                                 # If the installed version is masked, it may
3575                                                 # be necessary to look at lower versions,
3576                                                 # in case there is a visible downgrade.
3577                                                 continue
3578                                         reinstall_for_flags = None
3579                                         cache_key = (pkg_type, root, cpv, pkg_status)
3580                                         calculated_use = True
3581                                         pkg = self._pkg_cache.get(cache_key)
3582                                         if pkg is None:
3583                                                 calculated_use = False
3584                                                 try:
3585                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3586                                                 except KeyError:
3587                                                         continue
3588                                                 pkg = Package(built=built, cpv=cpv,
3589                                                         installed=installed, metadata=metadata,
3590                                                         onlydeps=onlydeps, root_config=root_config,
3591                                                         type_name=pkg_type)
3592                                                 metadata = pkg.metadata
3593                                                 if not built:
3594                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
3595                                                 if not built and ("?" in metadata["LICENSE"] or \
3596                                                         "?" in metadata["PROVIDE"]):
3597                                                         # This is avoided whenever possible because
3598                                                         # it's expensive. It only needs to be done here
3599                                                         # if it has an effect on visibility.
3600                                                         pkgsettings.setcpv(pkg)
3601                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
3602                                                         calculated_use = True
3603                                                 self._pkg_cache[pkg] = pkg
3604
3605                                         if not installed or (built and matched_packages):
3606                                                 # Only enforce visibility on installed packages
3607                                                 # if there is at least one other visible package
3608                                                 # available. By filtering installed masked packages
3609                                                 # here, packages that have been masked since they
3610                                                 # were installed can be automatically downgraded
3611                                                 # to an unmasked version.
3612                                                 try:
3613                                                         if not visible(pkgsettings, pkg):
3614                                                                 continue
3615                                                 except portage.exception.InvalidDependString:
3616                                                         if not installed:
3617                                                                 continue
3618
3619                                                 # Enable upgrade or downgrade to a version
3620                                                 # with visible KEYWORDS when the installed
3621                                                 # version is masked by KEYWORDS, but never
3622                                                 # reinstall the same exact version only due
3623                                                 # to a KEYWORDS mask.
3624                                                 if built and matched_packages:
3625
3626                                                         different_version = None
3627                                                         for avail_pkg in matched_packages:
3628                                                                 if not portage.dep.cpvequal(
3629                                                                         pkg.cpv, avail_pkg.cpv):
3630                                                                         different_version = avail_pkg
3631                                                                         break
3632                                                         if different_version is not None:
3633
3634                                                                 if installed and \
3635                                                                         pkgsettings._getMissingKeywords(
3636                                                                         pkg.cpv, pkg.metadata):
3637                                                                         continue
3638
3639                                                                 # If the ebuild no longer exists or it's
3640                                                                 # keywords have been dropped, reject built
3641                                                                 # instances (installed or binary).
3642                                                                 # If --usepkgonly is enabled, assume that
3643                                                                 # the ebuild status should be ignored.
3644                                                                 if not usepkgonly:
3645                                                                         try:
3646                                                                                 pkg_eb = self._pkg(
3647                                                                                         pkg.cpv, "ebuild", root_config)
3648                                                                         except portage.exception.PackageNotFound:
3649                                                                                 continue
3650                                                                         else:
3651                                                                                 if not visible(pkgsettings, pkg_eb):
3652                                                                                         continue
3653
3654                                         if not pkg.built and not calculated_use:
3655                                                 # This is avoided whenever possible because
3656                                                 # it's expensive.
3657                                                 pkgsettings.setcpv(pkg)
3658                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
3659
3660                                         if pkg.cp != atom.cp:
3661                                                 # A cpv can be returned from dbapi.match() as an
3662                                                 # old-style virtual match even in cases when the
3663                                                 # package does not actually PROVIDE the virtual.
3664                                                 # Filter out any such false matches here.
3665                                                 if not atom_set.findAtomForPackage(pkg):
3666                                                         continue
3667
3668                                         myarg = None
3669                                         if root == self.target_root:
3670                                                 try:
3671                                                         # Ebuild USE must have been calculated prior
3672                                                         # to this point, in case atoms have USE deps.
3673                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
3674                                                 except StopIteration:
3675                                                         pass
3676                                                 except portage.exception.InvalidDependString:
3677                                                         if not installed:
3678                                                                 # masked by corruption
3679                                                                 continue
3680                                         if not installed and myarg:
3681                                                 found_available_arg = True
3682
3683                                         if atom.use and not pkg.built:
3684                                                 use = pkg.use.enabled
3685                                                 if atom.use.enabled.difference(use):
3686                                                         continue
3687                                                 if atom.use.disabled.intersection(use):
3688                                                         continue
3689                                         if pkg.cp == atom_cp:
3690                                                 if highest_version is None:
3691                                                         highest_version = pkg
3692                                                 elif pkg > highest_version:
3693                                                         highest_version = pkg
3694                                         # At this point, we've found the highest visible
3695                                         # match from the current repo. Any lower versions
3696                                         # from this repo are ignored, so this so the loop
3697                                         # will always end with a break statement below
3698                                         # this point.
3699                                         if find_existing_node:
3700                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
3701                                                 if not e_pkg:
3702                                                         break
3703                                                 if portage.dep.match_from_list(atom, [e_pkg]):
3704                                                         if highest_version and \
3705                                                                 e_pkg.cp == atom_cp and \
3706                                                                 e_pkg < highest_version and \
3707                                                                 e_pkg.slot_atom != highest_version.slot_atom:
3708                                                                 # There is a higher version available in a
3709                                                                 # different slot, so this existing node is
3710                                                                 # irrelevant.
3711                                                                 pass
3712                                                         else:
3713                                                                 matched_packages.append(e_pkg)
3714                                                                 existing_node = e_pkg
3715                                                 break
3716                                         # Compare built package to current config and
3717                                         # reject the built package if necessary.
3718                                         if built and not installed and \
3719                                                 ("--newuse" in self.myopts or \
3720                                                 "--reinstall" in self.myopts):
3721                                                 iuses = pkg.iuse.all
3722                                                 old_use = pkg.use.enabled
3723                                                 if myeb:
3724                                                         pkgsettings.setcpv(myeb)
3725                                                 else:
3726                                                         pkgsettings.setcpv(pkg)
3727                                                 now_use = pkgsettings["PORTAGE_USE"].split()
3728                                                 forced_flags = set()
3729                                                 forced_flags.update(pkgsettings.useforce)
3730                                                 forced_flags.update(pkgsettings.usemask)
3731                                                 cur_iuse = iuses
3732                                                 if myeb and not usepkgonly:
3733                                                         cur_iuse = myeb.iuse.all
3734                                                 if self._reinstall_for_flags(forced_flags,
3735                                                         old_use, iuses,
3736                                                         now_use, cur_iuse):
3737                                                         break
3738                                         # Compare current config to installed package
3739                                         # and do not reinstall if possible.
3740                                         if not installed and \
3741                                                 ("--newuse" in self.myopts or \
3742                                                 "--reinstall" in self.myopts) and \
3743                                                 cpv in vardb.match(atom):
3744                                                 pkgsettings.setcpv(pkg)
3745                                                 forced_flags = set()
3746                                                 forced_flags.update(pkgsettings.useforce)
3747                                                 forced_flags.update(pkgsettings.usemask)
3748                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
3749                                                 old_iuse = set(filter_iuse_defaults(
3750                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
3751                                                 cur_use = pkg.use.enabled
3752                                                 cur_iuse = pkg.iuse.all
3753                                                 reinstall_for_flags = \
3754                                                         self._reinstall_for_flags(
3755                                                         forced_flags, old_use, old_iuse,
3756                                                         cur_use, cur_iuse)
3757                                                 if reinstall_for_flags:
3758                                                         reinstall = True
3759                                         if not built:
3760                                                 myeb = pkg
3761                                         matched_packages.append(pkg)
3762                                         if reinstall_for_flags:
3763                                                 self._reinstall_nodes[pkg] = \
3764                                                         reinstall_for_flags
3765                                         break
3766
3767                 if not matched_packages:
3768                         return None, None
3769
3770                 if "--debug" in self.myopts:
3771                         for pkg in matched_packages:
3772                                 portage.writemsg("%s %s\n" % \
3773                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
3774
3775                 # Filter out any old-style virtual matches if they are
3776                 # mixed with new-style virtual matches.
3777                 cp = portage.dep_getkey(atom)
3778                 if len(matched_packages) > 1 and \
3779                         "virtual" == portage.catsplit(cp)[0]:
3780                         for pkg in matched_packages:
3781                                 if pkg.cp != cp:
3782                                         continue
3783                                 # Got a new-style virtual, so filter
3784                                 # out any old-style virtuals.
3785                                 matched_packages = [pkg for pkg in matched_packages \
3786                                         if pkg.cp == cp]
3787                                 break
3788
3789                 if len(matched_packages) > 1:
3790                         bestmatch = portage.best(
3791                                 [pkg.cpv for pkg in matched_packages])
3792                         matched_packages = [pkg for pkg in matched_packages \
3793                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
3794
3795                 # ordered by type preference ("ebuild" type is the last resort)
3796                 return  matched_packages[-1], existing_node
3797
3798         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
3799                 """
3800                 Select packages that have already been added to the graph or
3801                 those that are installed and have not been scheduled for
3802                 replacement.
3803                 """
3804                 graph_db = self._graph_trees[root]["porttree"].dbapi
3805                 matches = graph_db.match_pkgs(atom)
3806                 if not matches:
3807                         return None, None
3808                 pkg = matches[-1] # highest match
3809                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
3810                 return pkg, in_graph
3811
3812         def _complete_graph(self):
3813                 """
3814                 Add any deep dependencies of required sets (args, system, world) that
3815                 have not been pulled into the graph yet. This ensures that the graph
3816                 is consistent such that initially satisfied deep dependencies are not
3817                 broken in the new graph. Initially unsatisfied dependencies are
3818                 irrelevant since we only want to avoid breaking dependencies that are
3819                 intially satisfied.
3820
3821                 Since this method can consume enough time to disturb users, it is
3822                 currently only enabled by the --complete-graph option.
3823                 """
3824                 if "--buildpkgonly" in self.myopts or \
3825                         "recurse" not in self.myparams:
3826                         return 1
3827
3828                 if "complete" not in self.myparams:
3829                         # Skip this to avoid consuming enough time to disturb users.
3830                         return 1
3831
3832                 # Put the depgraph into a mode that causes it to only
3833                 # select packages that have already been added to the
3834                 # graph or those that are installed and have not been
3835                 # scheduled for replacement. Also, toggle the "deep"
3836                 # parameter so that all dependencies are traversed and
3837                 # accounted for.
3838                 self._select_atoms = self._select_atoms_from_graph
3839                 self._select_package = self._select_pkg_from_graph
3840                 already_deep = "deep" in self.myparams
3841                 if not already_deep:
3842                         self.myparams.add("deep")
3843
3844                 for root in self.roots:
3845                         required_set_names = self._required_set_names.copy()
3846                         if root == self.target_root and \
3847                                 (already_deep or "empty" in self.myparams):
3848                                 required_set_names.difference_update(self._sets)
3849                         if not required_set_names and not self._ignored_deps:
3850                                 continue
3851                         root_config = self.roots[root]
3852                         setconfig = root_config.setconfig
3853                         args = []
3854                         # Reuse existing SetArg instances when available.
3855                         for arg in self.digraph.root_nodes():
3856                                 if not isinstance(arg, SetArg):
3857                                         continue
3858                                 if arg.root_config != root_config:
3859                                         continue
3860                                 if arg.name in required_set_names:
3861                                         args.append(arg)
3862                                         required_set_names.remove(arg.name)
3863                         # Create new SetArg instances only when necessary.
3864                         for s in required_set_names:
3865                                 expanded_set = InternalPackageSet(
3866                                         initial_atoms=setconfig.getSetAtoms(s))
3867                                 atom = SETPREFIX + s
3868                                 args.append(SetArg(arg=atom, set=expanded_set,
3869                                         root_config=root_config))
3870                         vardb = root_config.trees["vartree"].dbapi
3871                         for arg in args:
3872                                 for atom in arg.set:
3873                                         self._dep_stack.append(
3874                                                 Dependency(atom=atom, root=root, parent=arg))
3875                         if self._ignored_deps:
3876                                 self._dep_stack.extend(self._ignored_deps)
3877                                 self._ignored_deps = []
3878                         if not self._create_graph(allow_unsatisfied=True):
3879                                 return 0
3880                         # Check the unsatisfied deps to see if any initially satisfied deps
3881                         # will become unsatisfied due to an upgrade. Initially unsatisfied
3882                         # deps are irrelevant since we only want to avoid breaking deps
3883                         # that are initially satisfied.
3884                         while self._unsatisfied_deps:
3885                                 dep = self._unsatisfied_deps.pop()
3886                                 matches = vardb.match_pkgs(dep.atom)
3887                                 if not matches:
3888                                         self._initially_unsatisfied_deps.append(dep)
3889                                         continue
3890                                 # An scheduled installation broke a deep dependency.
3891                                 # Add the installed package to the graph so that it
3892                                 # will be appropriately reported as a slot collision
3893                                 # (possibly solvable via backtracking).
3894                                 pkg = matches[-1] # highest match
3895                                 if not self._add_pkg(pkg, dep):
3896                                         return 0
3897                                 if not self._create_graph(allow_unsatisfied=True):
3898                                         return 0
3899                 return 1
3900
3901         def _pkg(self, cpv, type_name, root_config, installed=False):
3902                 """
3903                 Get a package instance from the cache, or create a new
3904                 one if necessary. Raises KeyError from aux_get if it
3905                 failures for some reason (package does not exist or is
3906                 corrupt).
3907                 """
3908                 operation = "merge"
3909                 if installed:
3910                         operation = "nomerge"
3911                 pkg = self._pkg_cache.get(
3912                         (type_name, root_config.root, cpv, operation))
3913                 if pkg is None:
3914                         tree_type = self.pkg_tree_map[type_name]
3915                         db = root_config.trees[tree_type].dbapi
3916                         db_keys = list(self._trees_orig[root_config.root][
3917                                 tree_type].dbapi._aux_cache_keys)
3918                         try:
3919                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
3920                         except KeyError:
3921                                 raise portage.exception.PackageNotFound(cpv)
3922                         pkg = Package(cpv=cpv, metadata=metadata,
3923                                 root_config=root_config, installed=installed)
3924                         if type_name == "ebuild":
3925                                 settings = self.pkgsettings[root_config.root]
3926                                 settings.setcpv(pkg)
3927                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
3928                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
3929                         self._pkg_cache[pkg] = pkg
3930                 return pkg
3931
3932         def validate_blockers(self):
3933                 """Remove any blockers from the digraph that do not match any of the
3934                 packages within the graph.  If necessary, create hard deps to ensure
3935                 correct merge order such that mutually blocking packages are never
3936                 installed simultaneously."""
3937
3938                 if "--buildpkgonly" in self.myopts or \
3939                         "--nodeps" in self.myopts:
3940                         return True
3941
3942                 #if "deep" in self.myparams:
3943                 if True:
3944                         # Pull in blockers from all installed packages that haven't already
3945                         # been pulled into the depgraph.  This is not enabled by default
3946                         # due to the performance penalty that is incurred by all the
3947                         # additional dep_check calls that are required.
3948
3949                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
3950                         for myroot in self.trees:
3951                                 vardb = self.trees[myroot]["vartree"].dbapi
3952                                 portdb = self.trees[myroot]["porttree"].dbapi
3953                                 pkgsettings = self.pkgsettings[myroot]
3954                                 final_db = self.mydbapi[myroot]
3955
3956                                 blocker_cache = BlockerCache(myroot, vardb)
3957                                 stale_cache = set(blocker_cache)
3958                                 for pkg in vardb:
3959                                         cpv = pkg.cpv
3960                                         stale_cache.discard(cpv)
3961                                         pkg_in_graph = self.digraph.contains(pkg)
3962
3963                                         # Check for masked installed packages. Only warn about
3964                                         # packages that are in the graph in order to avoid warning
3965                                         # about those that will be automatically uninstalled during
3966                                         # the merge process or by --depclean.
3967                                         if pkg in final_db:
3968                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
3969                                                         self._masked_installed.add(pkg)
3970
3971                                         blocker_atoms = None
3972                                         blockers = None
3973                                         if pkg_in_graph:
3974                                                 blockers = []
3975                                                 try:
3976                                                         blockers.extend(
3977                                                                 self._blocker_parents.child_nodes(pkg))
3978                                                 except KeyError:
3979                                                         pass
3980                                                 try:
3981                                                         blockers.extend(
3982                                                                 self._irrelevant_blockers.child_nodes(pkg))
3983                                                 except KeyError:
3984                                                         pass
3985                                         if blockers is not None:
3986                                                 blockers = set(str(blocker.atom) \
3987                                                         for blocker in blockers)
3988
3989                                         # If this node has any blockers, create a "nomerge"
3990                                         # node for it so that they can be enforced.
3991                                         self.spinner.update()
3992                                         blocker_data = blocker_cache.get(cpv)
3993                                         if blocker_data is not None and \
3994                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
3995                                                 blocker_data = None
3996
3997                                         # If blocker data from the graph is available, use
3998                                         # it to validate the cache and update the cache if
3999                                         # it seems invalid.
4000                                         if blocker_data is not None and \
4001                                                 blockers is not None:
4002                                                 if not blockers.symmetric_difference(
4003                                                         blocker_data.atoms):
4004                                                         continue
4005                                                 blocker_data = None
4006
4007                                         if blocker_data is None and \
4008                                                 blockers is not None:
4009                                                 # Re-use the blockers from the graph.
4010                                                 blocker_atoms = sorted(blockers)
4011                                                 counter = long(pkg.metadata["COUNTER"])
4012                                                 blocker_data = \
4013                                                         blocker_cache.BlockerData(counter, blocker_atoms)
4014                                                 blocker_cache[pkg.cpv] = blocker_data
4015                                                 continue
4016
4017                                         if blocker_data:
4018                                                 blocker_atoms = blocker_data.atoms
4019                                         else:
4020                                                 # Use aux_get() to trigger FakeVartree global
4021                                                 # updates on *DEPEND when appropriate.
4022                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4023                                                 # It is crucial to pass in final_db here in order to
4024                                                 # optimize dep_check calls by eliminating atoms via
4025                                                 # dep_wordreduce and dep_eval calls.
4026                                                 try:
4027                                                         portage.dep._dep_check_strict = False
4028                                                         try:
4029                                                                 success, atoms = portage.dep_check(depstr,
4030                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
4031                                                                         trees=self._graph_trees, myroot=myroot)
4032                                                         except Exception, e:
4033                                                                 if isinstance(e, SystemExit):
4034                                                                         raise
4035                                                                 # This is helpful, for example, if a ValueError
4036                                                                 # is thrown from cpv_expand due to multiple
4037                                                                 # matches (this can happen if an atom lacks a
4038                                                                 # category).
4039                                                                 show_invalid_depstring_notice(
4040                                                                         pkg, depstr, str(e))
4041                                                                 del e
4042                                                                 raise
4043                                                 finally:
4044                                                         portage.dep._dep_check_strict = True
4045                                                 if not success:
4046                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
4047                                                         if replacement_pkg and \
4048                                                                 replacement_pkg[0].operation == "merge":
4049                                                                 # This package is being replaced anyway, so
4050                                                                 # ignore invalid dependencies so as not to
4051                                                                 # annoy the user too much (otherwise they'd be
4052                                                                 # forced to manually unmerge it first).
4053                                                                 continue
4054                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
4055                                                         return False
4056                                                 blocker_atoms = [myatom for myatom in atoms \
4057                                                         if myatom.startswith("!")]
4058                                                 blocker_atoms.sort()
4059                                                 counter = long(pkg.metadata["COUNTER"])
4060                                                 blocker_cache[cpv] = \
4061                                                         blocker_cache.BlockerData(counter, blocker_atoms)
4062                                         if blocker_atoms:
4063                                                 try:
4064                                                         for atom in blocker_atoms:
4065                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
4066                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
4067                                                                 self._blocker_parents.add(blocker, pkg)
4068                                                 except portage.exception.InvalidAtom, e:
4069                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
4070                                                         show_invalid_depstring_notice(
4071                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
4072                                                         return False
4073                                 for cpv in stale_cache:
4074                                         del blocker_cache[cpv]
4075                                 blocker_cache.flush()
4076                                 del blocker_cache
4077
4078                 # Discard any "uninstall" tasks scheduled by previous calls
4079                 # to this method, since those tasks may not make sense given
4080                 # the current graph state.
4081                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
4082                 if previous_uninstall_tasks:
4083                         self._blocker_uninstalls = digraph()
4084                         self.digraph.difference_update(previous_uninstall_tasks)
4085
4086                 for blocker in self._blocker_parents.leaf_nodes():
4087                         self.spinner.update()
4088                         root_config = self.roots[blocker.root]
4089                         virtuals = root_config.settings.getvirtuals()
4090                         myroot = blocker.root
4091                         initial_db = self.trees[myroot]["vartree"].dbapi
4092                         final_db = self.mydbapi[myroot]
4093                         
4094                         provider_virtual = False
4095                         if blocker.cp in virtuals and \
4096                                 not self._have_new_virt(blocker.root, blocker.cp):
4097                                 provider_virtual = True
4098
4099                         # Use this to check PROVIDE for each matched package
4100                         # when necessary.
4101                         atom_set = InternalPackageSet(
4102                                 initial_atoms=[blocker.atom])
4103
4104                         if provider_virtual:
4105                                 atoms = []
4106                                 for provider_entry in virtuals[blocker.cp]:
4107                                         provider_cp = \
4108                                                 portage.dep_getkey(provider_entry)
4109                                         atoms.append(blocker.atom.replace(
4110                                                 blocker.cp, provider_cp))
4111                         else:
4112                                 atoms = [blocker.atom]
4113
4114                         blocked_initial = set()
4115                         for atom in atoms:
4116                                 for pkg in initial_db.match_pkgs(atom):
4117                                         if atom_set.findAtomForPackage(pkg):
4118                                                 blocked_initial.add(pkg)
4119
4120                         blocked_final = set()
4121                         for atom in atoms:
4122                                 for pkg in final_db.match_pkgs(atom):
4123                                         if atom_set.findAtomForPackage(pkg):
4124                                                 blocked_final.add(pkg)
4125
4126                         if not blocked_initial and not blocked_final:
4127                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
4128                                 self._blocker_parents.remove(blocker)
4129                                 # Discard any parents that don't have any more blockers.
4130                                 for pkg in parent_pkgs:
4131                                         self._irrelevant_blockers.add(blocker, pkg)
4132                                         if not self._blocker_parents.child_nodes(pkg):
4133                                                 self._blocker_parents.remove(pkg)
4134                                 continue
4135                         for parent in self._blocker_parents.parent_nodes(blocker):
4136                                 unresolved_blocks = False
4137                                 depends_on_order = set()
4138                                 for pkg in blocked_initial:
4139                                         if pkg.slot_atom == parent.slot_atom:
4140                                                 # TODO: Support blocks within slots in cases where it
4141                                                 # might make sense.  For example, a new version might
4142                                                 # require that the old version be uninstalled at build
4143                                                 # time.
4144                                                 continue
4145                                         if parent.installed:
4146                                                 # Two currently installed packages conflict with
4147                                                 # eachother. Ignore this case since the damage
4148                                                 # is already done and this would be likely to
4149                                                 # confuse users if displayed like a normal blocker.
4150                                                 continue
4151
4152                                         self._blocked_pkgs.add(pkg, blocker)
4153
4154                                         if parent.operation == "merge":
4155                                                 # Maybe the blocked package can be replaced or simply
4156                                                 # unmerged to resolve this block.
4157                                                 depends_on_order.add((pkg, parent))
4158                                                 continue
4159                                         # None of the above blocker resolutions techniques apply,
4160                                         # so apparently this one is unresolvable.
4161                                         unresolved_blocks = True
4162                                 for pkg in blocked_final:
4163                                         if pkg.slot_atom == parent.slot_atom:
4164                                                 # TODO: Support blocks within slots.
4165                                                 continue
4166                                         if parent.operation == "nomerge" and \
4167                                                 pkg.operation == "nomerge":
4168                                                 # This blocker will be handled the next time that a
4169                                                 # merge of either package is triggered.
4170                                                 continue
4171
4172                                         self._blocked_pkgs.add(pkg, blocker)
4173
4174                                         # Maybe the blocking package can be
4175                                         # unmerged to resolve this block.
4176                                         if parent.operation == "merge" and pkg.installed:
4177                                                 depends_on_order.add((pkg, parent))
4178                                                 continue
4179                                         elif parent.operation == "nomerge":
4180                                                 depends_on_order.add((parent, pkg))
4181                                                 continue
4182                                         # None of the above blocker resolutions techniques apply,
4183                                         # so apparently this one is unresolvable.
4184                                         unresolved_blocks = True
4185
4186                                 # Make sure we don't unmerge any package that have been pulled
4187                                 # into the graph.
4188                                 if not unresolved_blocks and depends_on_order:
4189                                         for inst_pkg, inst_task in depends_on_order:
4190                                                 if self.digraph.contains(inst_pkg) and \
4191                                                         self.digraph.parent_nodes(inst_pkg):
4192                                                         unresolved_blocks = True
4193                                                         break
4194
4195                                 if not unresolved_blocks and depends_on_order:
4196                                         for inst_pkg, inst_task in depends_on_order:
4197                                                 uninst_task = Package(built=inst_pkg.built,
4198                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
4199                                                         metadata=inst_pkg.metadata,
4200                                                         operation="uninstall",
4201                                                         root_config=inst_pkg.root_config,
4202                                                         type_name=inst_pkg.type_name)
4203                                                 self._pkg_cache[uninst_task] = uninst_task
4204                                                 # Enforce correct merge order with a hard dep.
4205                                                 self.digraph.addnode(uninst_task, inst_task,
4206                                                         priority=BlockerDepPriority.instance)
4207                                                 # Count references to this blocker so that it can be
4208                                                 # invalidated after nodes referencing it have been
4209                                                 # merged.
4210                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
4211                                 if not unresolved_blocks and not depends_on_order:
4212                                         self._irrelevant_blockers.add(blocker, parent)
4213                                         self._blocker_parents.remove_edge(blocker, parent)
4214                                         if not self._blocker_parents.parent_nodes(blocker):
4215                                                 self._blocker_parents.remove(blocker)
4216                                         if not self._blocker_parents.child_nodes(parent):
4217                                                 self._blocker_parents.remove(parent)
4218                                 if unresolved_blocks:
4219                                         self._unsolvable_blockers.add(blocker, parent)
4220
4221                 return True
4222
4223         def _accept_blocker_conflicts(self):
4224                 acceptable = False
4225                 for x in ("--buildpkgonly", "--fetchonly",
4226                         "--fetch-all-uri", "--nodeps"):
4227                         if x in self.myopts:
4228                                 acceptable = True
4229                                 break
4230                 return acceptable
4231
4232         def _merge_order_bias(self, mygraph):
4233                 """
4234                 For optimal leaf node selection, promote deep system runtime deps and
4235                 order nodes from highest to lowest overall reference count.
4236                 """
4237
4238                 node_info = {}
4239                 for node in mygraph.order:
4240                         node_info[node] = len(mygraph.parent_nodes(node))
4241                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
4242
4243                 def cmp_merge_preference(node1, node2):
4244
4245                         if node1.operation == 'uninstall':
4246                                 if node2.operation == 'uninstall':
4247                                         return 0
4248                                 return 1
4249
4250                         if node2.operation == 'uninstall':
4251                                 if node1.operation == 'uninstall':
4252                                         return 0
4253                                 return -1
4254
4255                         node1_sys = node1 in deep_system_deps
4256                         node2_sys = node2 in deep_system_deps
4257                         if node1_sys != node2_sys:
4258                                 if node1_sys:
4259                                         return -1
4260                                 return 1
4261
4262                         return node_info[node2] - node_info[node1]
4263
4264                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
4265
4266         def altlist(self, reversed=False):
4267
4268                 while self._serialized_tasks_cache is None:
4269                         self._resolve_conflicts()
4270                         try:
4271                                 self._serialized_tasks_cache, self._scheduler_graph = \
4272                                         self._serialize_tasks()
4273                         except self._serialize_tasks_retry:
4274                                 pass
4275
4276                 retlist = self._serialized_tasks_cache[:]
4277                 if reversed:
4278                         retlist.reverse()
4279                 return retlist
4280
4281         def schedulerGraph(self):
4282                 """
4283                 The scheduler graph is identical to the normal one except that
4284                 uninstall edges are reversed in specific cases that require
4285                 conflicting packages to be temporarily installed simultaneously.
4286                 This is intended for use by the Scheduler in it's parallelization
4287                 logic. It ensures that temporary simultaneous installation of
4288                 conflicting packages is avoided when appropriate (especially for
4289                 !!atom blockers), but allowed in specific cases that require it.
4290
4291                 Note that this method calls break_refs() which alters the state of
4292                 internal Package instances such that this depgraph instance should
4293                 not be used to perform any more calculations.
4294                 """
4295                 if self._scheduler_graph is None:
4296                         self.altlist()
4297                 self.break_refs(self._scheduler_graph.order)
4298                 return self._scheduler_graph
4299
4300         def break_refs(self, nodes):
4301                 """
4302                 Take a mergelist like that returned from self.altlist() and
4303                 break any references that lead back to the depgraph. This is
4304                 useful if you want to hold references to packages without
4305                 also holding the depgraph on the heap.
4306                 """
4307                 for node in nodes:
4308                         if hasattr(node, "root_config"):
4309                                 # The FakeVartree references the _package_cache which
4310                                 # references the depgraph. So that Package instances don't
4311                                 # hold the depgraph and FakeVartree on the heap, replace
4312                                 # the RootConfig that references the FakeVartree with the
4313                                 # original RootConfig instance which references the actual
4314                                 # vartree.
4315                                 node.root_config = \
4316                                         self._trees_orig[node.root_config.root]["root_config"]
4317
4318         def _resolve_conflicts(self):
4319                 if not self._complete_graph():
4320                         raise self._unknown_internal_error()
4321
4322                 if not self.validate_blockers():
4323                         raise self._unknown_internal_error()
4324
4325                 if self._slot_collision_info:
4326                         self._process_slot_conflicts()
4327
4328         def _serialize_tasks(self):
4329
4330                 if "--debug" in self.myopts:
4331                         writemsg("\ndigraph:\n\n", noiselevel=-1)
4332                         self.digraph.debug_print()
4333                         writemsg("\n", noiselevel=-1)
4334
4335                 scheduler_graph = self.digraph.copy()
4336
4337                 if '--nodeps' in self.myopts:
4338                         # Preserve the package order given on the command line.
4339                         return ([node for node in scheduler_graph \
4340                                 if isinstance(node, Package) \
4341                                 and node.operation == 'merge'], scheduler_graph)
4342
4343                 mygraph=self.digraph.copy()
4344                 # Prune "nomerge" root nodes if nothing depends on them, since
4345                 # otherwise they slow down merge order calculation. Don't remove
4346                 # non-root nodes since they help optimize merge order in some cases
4347                 # such as revdep-rebuild.
4348                 removed_nodes = set()
4349                 while True:
4350                         for node in mygraph.root_nodes():
4351                                 if not isinstance(node, Package) or \
4352                                         node.installed or node.onlydeps:
4353                                         removed_nodes.add(node)
4354                         if removed_nodes:
4355                                 self.spinner.update()
4356                                 mygraph.difference_update(removed_nodes)
4357                         if not removed_nodes:
4358                                 break
4359                         removed_nodes.clear()
4360                 self._merge_order_bias(mygraph)
4361                 def cmp_circular_bias(n1, n2):
4362                         """
4363                         RDEPEND is stronger than PDEPEND and this function
4364                         measures such a strength bias within a circular
4365                         dependency relationship.
4366                         """
4367                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
4368                                 ignore_priority=priority_range.ignore_medium_soft)
4369                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
4370                                 ignore_priority=priority_range.ignore_medium_soft)
4371                         if n1_n2_medium == n2_n1_medium:
4372                                 return 0
4373                         elif n1_n2_medium:
4374                                 return 1
4375                         return -1
4376                 myblocker_uninstalls = self._blocker_uninstalls.copy()
4377                 retlist=[]
4378                 # Contains uninstall tasks that have been scheduled to
4379                 # occur after overlapping blockers have been installed.
4380                 scheduled_uninstalls = set()
4381                 # Contains any Uninstall tasks that have been ignored
4382                 # in order to avoid the circular deps code path. These
4383                 # correspond to blocker conflicts that could not be
4384                 # resolved.
4385                 ignored_uninstall_tasks = set()
4386                 have_uninstall_task = False
4387                 complete = "complete" in self.myparams
4388                 asap_nodes = []
4389
4390                 def get_nodes(**kwargs):
4391                         """
4392                         Returns leaf nodes excluding Uninstall instances
4393                         since those should be executed as late as possible.
4394                         """
4395                         return [node for node in mygraph.leaf_nodes(**kwargs) \
4396                                 if isinstance(node, Package) and \
4397                                         (node.operation != "uninstall" or \
4398                                         node in scheduled_uninstalls)]
4399
4400                 # sys-apps/portage needs special treatment if ROOT="/"
4401                 running_root = self._running_root.root
4402                 from portage.const import PORTAGE_PACKAGE_ATOM
4403                 runtime_deps = InternalPackageSet(
4404                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
4405                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
4406                         PORTAGE_PACKAGE_ATOM)
4407                 replacement_portage = self.mydbapi[running_root].match_pkgs(
4408                         PORTAGE_PACKAGE_ATOM)
4409
4410                 if running_portage:
4411                         running_portage = running_portage[0]
4412                 else:
4413                         running_portage = None
4414
4415                 if replacement_portage:
4416                         replacement_portage = replacement_portage[0]
4417                 else:
4418                         replacement_portage = None
4419
4420                 if replacement_portage == running_portage:
4421                         replacement_portage = None
4422
4423                 if replacement_portage is not None:
4424                         # update from running_portage to replacement_portage asap
4425                         asap_nodes.append(replacement_portage)
4426
4427                 if running_portage is not None:
4428                         try:
4429                                 portage_rdepend = self._select_atoms_highest_available(
4430                                         running_root, running_portage.metadata["RDEPEND"],
4431                                         myuse=running_portage.use.enabled,
4432                                         parent=running_portage, strict=False)
4433                         except portage.exception.InvalidDependString, e:
4434                                 portage.writemsg("!!! Invalid RDEPEND in " + \
4435                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
4436                                         (running_root, running_portage.cpv, e), noiselevel=-1)
4437                                 del e
4438                                 portage_rdepend = []
4439                         runtime_deps.update(atom for atom in portage_rdepend \
4440                                 if not atom.startswith("!"))
4441
4442                 def gather_deps(ignore_priority, mergeable_nodes,
4443                         selected_nodes, node):
4444                         """
4445                         Recursively gather a group of nodes that RDEPEND on
4446                         eachother. This ensures that they are merged as a group
4447                         and get their RDEPENDs satisfied as soon as possible.
4448                         """
4449                         if node in selected_nodes:
4450                                 return True
4451                         if node not in mergeable_nodes:
4452                                 return False
4453                         if node == replacement_portage and \
4454                                 mygraph.child_nodes(node,
4455                                 ignore_priority=priority_range.ignore_medium_soft):
4456                                 # Make sure that portage always has all of it's
4457                                 # RDEPENDs installed first.
4458                                 return False
4459                         selected_nodes.add(node)
4460                         for child in mygraph.child_nodes(node,
4461                                 ignore_priority=ignore_priority):
4462                                 if not gather_deps(ignore_priority,
4463                                         mergeable_nodes, selected_nodes, child):
4464                                         return False
4465                         return True
4466
4467                 def ignore_uninst_or_med(priority):
4468                         if priority is BlockerDepPriority.instance:
4469                                 return True
4470                         return priority_range.ignore_medium(priority)
4471
4472                 def ignore_uninst_or_med_soft(priority):
4473                         if priority is BlockerDepPriority.instance:
4474                                 return True
4475                         return priority_range.ignore_medium_soft(priority)
4476
4477                 tree_mode = "--tree" in self.myopts
4478                 # Tracks whether or not the current iteration should prefer asap_nodes
4479                 # if available.  This is set to False when the previous iteration
4480                 # failed to select any nodes.  It is reset whenever nodes are
4481                 # successfully selected.
4482                 prefer_asap = True
4483
4484                 # Controls whether or not the current iteration should drop edges that
4485                 # are "satisfied" by installed packages, in order to solve circular
4486                 # dependencies. The deep runtime dependencies of installed packages are
4487                 # not checked in this case (bug #199856), so it must be avoided
4488                 # whenever possible.
4489                 drop_satisfied = False
4490
4491                 # State of variables for successive iterations that loosen the
4492                 # criteria for node selection.
4493                 #
4494                 # iteration   prefer_asap   drop_satisfied
4495                 # 1           True          False
4496                 # 2           False         False
4497                 # 3           False         True
4498                 #
4499                 # If no nodes are selected on the last iteration, it is due to
4500                 # unresolved blockers or circular dependencies.
4501
4502                 while not mygraph.empty():
4503                         self.spinner.update()
4504                         selected_nodes = None
4505                         ignore_priority = None
4506                         if drop_satisfied or (prefer_asap and asap_nodes):
4507                                 priority_range = DepPrioritySatisfiedRange
4508                         else:
4509                                 priority_range = DepPriorityNormalRange
4510                         if prefer_asap and asap_nodes:
4511                                 # ASAP nodes are merged before their soft deps. Go ahead and
4512                                 # select root nodes here if necessary, since it's typical for
4513                                 # the parent to have been removed from the graph already.
4514                                 asap_nodes = [node for node in asap_nodes \
4515                                         if mygraph.contains(node)]
4516                                 for node in asap_nodes:
4517                                         if not mygraph.child_nodes(node,
4518                                                 ignore_priority=priority_range.ignore_soft):
4519                                                 selected_nodes = [node]
4520                                                 asap_nodes.remove(node)
4521                                                 break
4522                         if not selected_nodes and \
4523                                 not (prefer_asap and asap_nodes):
4524                                 for i in xrange(priority_range.NONE,
4525                                         priority_range.MEDIUM_SOFT + 1):
4526                                         ignore_priority = priority_range.ignore_priority[i]
4527                                         nodes = get_nodes(ignore_priority=ignore_priority)
4528                                         if nodes:
4529                                                 # If there is a mix of uninstall nodes with other
4530                                                 # types, save the uninstall nodes for later since
4531                                                 # sometimes a merge node will render an uninstall
4532                                                 # node unnecessary (due to occupying the same slot),
4533                                                 # and we want to avoid executing a separate uninstall
4534                                                 # task in that case.
4535                                                 if len(nodes) > 1:
4536                                                         good_uninstalls = []
4537                                                         with_some_uninstalls_excluded = []
4538                                                         for node in nodes:
4539                                                                 if node.operation == "uninstall":
4540                                                                         slot_node = self.mydbapi[node.root
4541                                                                                 ].match_pkgs(node.slot_atom)
4542                                                                         if slot_node and \
4543                                                                                 slot_node[0].operation == "merge":
4544                                                                                 continue
4545                                                                         good_uninstalls.append(node)
4546                                                                 with_some_uninstalls_excluded.append(node)
4547                                                         if good_uninstalls:
4548                                                                 nodes = good_uninstalls
4549                                                         elif with_some_uninstalls_excluded:
4550                                                                 nodes = with_some_uninstalls_excluded
4551                                                         else:
4552                                                                 nodes = nodes
4553
4554                                                 if ignore_priority is None and not tree_mode:
4555                                                         # Greedily pop all of these nodes since no
4556                                                         # relationship has been ignored. This optimization
4557                                                         # destroys --tree output, so it's disabled in tree
4558                                                         # mode.
4559                                                         selected_nodes = nodes
4560                                                 else:
4561                                                         # For optimal merge order:
4562                                                         #  * Only pop one node.
4563                                                         #  * Removing a root node (node without a parent)
4564                                                         #    will not produce a leaf node, so avoid it.
4565                                                         #  * It's normal for a selected uninstall to be a
4566                                                         #    root node, so don't check them for parents.
4567                                                         for node in nodes:
4568                                                                 if node.operation == "uninstall" or \
4569                                                                         mygraph.parent_nodes(node):
4570                                                                         selected_nodes = [node]
4571                                                                         break
4572
4573                                                 if selected_nodes:
4574                                                         break
4575
4576                         if not selected_nodes:
4577                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
4578                                 if nodes:
4579                                         mergeable_nodes = set(nodes)
4580                                         if prefer_asap and asap_nodes:
4581                                                 nodes = asap_nodes
4582                                         for i in xrange(priority_range.SOFT,
4583                                                 priority_range.MEDIUM_SOFT + 1):
4584                                                 ignore_priority = priority_range.ignore_priority[i]
4585                                                 for node in nodes:
4586                                                         if not mygraph.parent_nodes(node):
4587                                                                 continue
4588                                                         selected_nodes = set()
4589                                                         if gather_deps(ignore_priority,
4590                                                                 mergeable_nodes, selected_nodes, node):
4591                                                                 break
4592                                                         else:
4593                                                                 selected_nodes = None
4594                                                 if selected_nodes:
4595                                                         break
4596
4597                                         if prefer_asap and asap_nodes and not selected_nodes:
4598                                                 # We failed to find any asap nodes to merge, so ignore
4599                                                 # them for the next iteration.
4600                                                 prefer_asap = False
4601                                                 continue
4602
4603                         if selected_nodes and ignore_priority is not None:
4604                                 # Try to merge ignored medium_soft deps as soon as possible
4605                                 # if they're not satisfied by installed packages.
4606                                 for node in selected_nodes:
4607                                         children = set(mygraph.child_nodes(node))
4608                                         soft = children.difference(
4609                                                 mygraph.child_nodes(node,
4610                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
4611                                         medium_soft = children.difference(
4612                                                 mygraph.child_nodes(node,
4613                                                         ignore_priority = \
4614                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
4615                                         medium_soft.difference_update(soft)
4616                                         for child in medium_soft:
4617                                                 if child in selected_nodes:
4618                                                         continue
4619                                                 if child in asap_nodes:
4620                                                         continue
4621                                                 asap_nodes.append(child)
4622
4623                         if selected_nodes and len(selected_nodes) > 1:
4624                                 if not isinstance(selected_nodes, list):
4625                                         selected_nodes = list(selected_nodes)
4626                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
4627
4628                         if not selected_nodes and not myblocker_uninstalls.is_empty():
4629                                 # An Uninstall task needs to be executed in order to
4630                                 # avoid conflict if possible.
4631
4632                                 if drop_satisfied:
4633                                         priority_range = DepPrioritySatisfiedRange
4634                                 else:
4635                                         priority_range = DepPriorityNormalRange
4636
4637                                 mergeable_nodes = get_nodes(
4638                                         ignore_priority=ignore_uninst_or_med)
4639
4640                                 min_parent_deps = None
4641                                 uninst_task = None
4642                                 for task in myblocker_uninstalls.leaf_nodes():
4643                                         # Do some sanity checks so that system or world packages
4644                                         # don't get uninstalled inappropriately here (only really
4645                                         # necessary when --complete-graph has not been enabled).
4646
4647                                         if task in ignored_uninstall_tasks:
4648                                                 continue
4649
4650                                         if task in scheduled_uninstalls:
4651                                                 # It's been scheduled but it hasn't
4652                                                 # been executed yet due to dependence
4653                                                 # on installation of blocking packages.
4654                                                 continue
4655
4656                                         root_config = self.roots[task.root]
4657                                         inst_pkg = self._pkg_cache[
4658                                                 ("installed", task.root, task.cpv, "nomerge")]
4659
4660                                         if self.digraph.contains(inst_pkg):
4661                                                 continue
4662
4663                                         forbid_overlap = False
4664                                         heuristic_overlap = False
4665                                         for blocker in myblocker_uninstalls.parent_nodes(task):
4666                                                 if blocker.eapi in ("0", "1"):
4667                                                         heuristic_overlap = True
4668                                                 elif blocker.atom.blocker.overlap.forbid:
4669                                                         forbid_overlap = True
4670                                                         break
4671                                         if forbid_overlap and running_root == task.root:
4672                                                 continue
4673
4674                                         if heuristic_overlap and running_root == task.root:
4675                                                 # Never uninstall sys-apps/portage or it's essential
4676                                                 # dependencies, except through replacement.
4677                                                 try:
4678                                                         runtime_dep_atoms = \
4679                                                                 list(runtime_deps.iterAtomsForPackage(task))
4680                                                 except portage.exception.InvalidDependString, e:
4681                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
4682                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4683                                                                 (task.root, task.cpv, e), noiselevel=-1)
4684                                                         del e
4685                                                         continue
4686
4687                                                 # Don't uninstall a runtime dep if it appears
4688                                                 # to be the only suitable one installed.
4689                                                 skip = False
4690                                                 vardb = root_config.trees["vartree"].dbapi
4691                                                 for atom in runtime_dep_atoms:
4692                                                         other_version = None
4693                                                         for pkg in vardb.match_pkgs(atom):
4694                                                                 if pkg.cpv == task.cpv and \
4695                                                                         pkg.metadata["COUNTER"] == \
4696                                                                         task.metadata["COUNTER"]:
4697                                                                         continue
4698                                                                 other_version = pkg
4699                                                                 break
4700                                                         if other_version is None:
4701                                                                 skip = True
4702                                                                 break
4703                                                 if skip:
4704                                                         continue
4705
4706                                                 # For packages in the system set, don't take
4707                                                 # any chances. If the conflict can't be resolved
4708                                                 # by a normal replacement operation then abort.
4709                                                 skip = False
4710                                                 try:
4711                                                         for atom in root_config.sets[
4712                                                                 "system"].iterAtomsForPackage(task):
4713                                                                 skip = True
4714                                                                 break
4715                                                 except portage.exception.InvalidDependString, e:
4716                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
4717                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4718                                                                 (task.root, task.cpv, e), noiselevel=-1)
4719                                                         del e
4720                                                         skip = True
4721                                                 if skip:
4722                                                         continue
4723
4724                                         # Note that the world check isn't always
4725                                         # necessary since self._complete_graph() will
4726                                         # add all packages from the system and world sets to the
4727                                         # graph. This just allows unresolved conflicts to be
4728                                         # detected as early as possible, which makes it possible
4729                                         # to avoid calling self._complete_graph() when it is
4730                                         # unnecessary due to blockers triggering an abortion.
4731                                         if not complete:
4732                                                 # For packages in the world set, go ahead an uninstall
4733                                                 # when necessary, as long as the atom will be satisfied
4734                                                 # in the final state.
4735                                                 graph_db = self.mydbapi[task.root]
4736                                                 skip = False
4737                                                 try:
4738                                                         for atom in root_config.sets[
4739                                                                 "world"].iterAtomsForPackage(task):
4740                                                                 satisfied = False
4741                                                                 for pkg in graph_db.match_pkgs(atom):
4742                                                                         if pkg == inst_pkg:
4743                                                                                 continue
4744                                                                         satisfied = True
4745                                                                         break
4746                                                                 if not satisfied:
4747                                                                         skip = True
4748                                                                         self._blocked_world_pkgs[inst_pkg] = atom
4749                                                                         break
4750                                                 except portage.exception.InvalidDependString, e:
4751                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
4752                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
4753                                                                 (task.root, task.cpv, e), noiselevel=-1)
4754                                                         del e
4755                                                         skip = True
4756                                                 if skip:
4757                                                         continue
4758
4759                                         # Check the deps of parent nodes to ensure that
4760                                         # the chosen task produces a leaf node. Maybe
4761                                         # this can be optimized some more to make the
4762                                         # best possible choice, but the current algorithm
4763                                         # is simple and should be near optimal for most
4764                                         # common cases.
4765                                         mergeable_parent = False
4766                                         parent_deps = set()
4767                                         for parent in mygraph.parent_nodes(task):
4768                                                 parent_deps.update(mygraph.child_nodes(parent,
4769                                                         ignore_priority=priority_range.ignore_medium_soft))
4770                                                 if parent in mergeable_nodes and \
4771                                                         gather_deps(ignore_uninst_or_med_soft,
4772                                                         mergeable_nodes, set(), parent):
4773                                                         mergeable_parent = True
4774
4775                                         if not mergeable_parent:
4776                                                 continue
4777
4778                                         parent_deps.remove(task)
4779                                         if min_parent_deps is None or \
4780                                                 len(parent_deps) < min_parent_deps:
4781                                                 min_parent_deps = len(parent_deps)
4782                                                 uninst_task = task
4783
4784                                 if uninst_task is not None:
4785                                         # The uninstall is performed only after blocking
4786                                         # packages have been merged on top of it. File
4787                                         # collisions between blocking packages are detected
4788                                         # and removed from the list of files to be uninstalled.
4789                                         scheduled_uninstalls.add(uninst_task)
4790                                         parent_nodes = mygraph.parent_nodes(uninst_task)
4791
4792                                         # Reverse the parent -> uninstall edges since we want
4793                                         # to do the uninstall after blocking packages have
4794                                         # been merged on top of it.
4795                                         mygraph.remove(uninst_task)
4796                                         for blocked_pkg in parent_nodes:
4797                                                 mygraph.add(blocked_pkg, uninst_task,
4798                                                         priority=BlockerDepPriority.instance)
4799                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
4800                                                 scheduler_graph.add(blocked_pkg, uninst_task,
4801                                                         priority=BlockerDepPriority.instance)
4802
4803                                         # Reset the state variables for leaf node selection and
4804                                         # continue trying to select leaf nodes.
4805                                         prefer_asap = True
4806                                         drop_satisfied = False
4807                                         continue
4808
4809                         if not selected_nodes:
4810                                 # Only select root nodes as a last resort. This case should
4811                                 # only trigger when the graph is nearly empty and the only
4812                                 # remaining nodes are isolated (no parents or children). Since
4813                                 # the nodes must be isolated, ignore_priority is not needed.
4814                                 selected_nodes = get_nodes()
4815
4816                         if not selected_nodes and not drop_satisfied:
4817                                 drop_satisfied = True
4818                                 continue
4819
4820                         if not selected_nodes and not myblocker_uninstalls.is_empty():
4821                                 # If possible, drop an uninstall task here in order to avoid
4822                                 # the circular deps code path. The corresponding blocker will
4823                                 # still be counted as an unresolved conflict.
4824                                 uninst_task = None
4825                                 for node in myblocker_uninstalls.leaf_nodes():
4826                                         try:
4827                                                 mygraph.remove(node)
4828                                         except KeyError:
4829                                                 pass
4830                                         else:
4831                                                 uninst_task = node
4832                                                 ignored_uninstall_tasks.add(node)
4833                                                 break
4834
4835                                 if uninst_task is not None:
4836                                         # Reset the state variables for leaf node selection and
4837                                         # continue trying to select leaf nodes.
4838                                         prefer_asap = True
4839                                         drop_satisfied = False
4840                                         continue
4841
4842                         if not selected_nodes:
4843                                 self._circular_deps_for_display = mygraph
4844                                 raise self._unknown_internal_error()
4845
4846                         # At this point, we've succeeded in selecting one or more nodes, so
4847                         # reset state variables for leaf node selection.
4848                         prefer_asap = True
4849                         drop_satisfied = False
4850
4851                         mygraph.difference_update(selected_nodes)
4852
4853                         for node in selected_nodes:
4854                                 if isinstance(node, Package) and \
4855                                         node.operation == "nomerge":
4856                                         continue
4857
4858                                 # Handle interactions between blockers
4859                                 # and uninstallation tasks.
4860                                 solved_blockers = set()
4861                                 uninst_task = None
4862                                 if isinstance(node, Package) and \
4863                                         "uninstall" == node.operation:
4864                                         have_uninstall_task = True
4865                                         uninst_task = node
4866                                 else:
4867                                         vardb = self.trees[node.root]["vartree"].dbapi
4868                                         previous_cpv = vardb.match(node.slot_atom)
4869                                         if previous_cpv:
4870                                                 # The package will be replaced by this one, so remove
4871                                                 # the corresponding Uninstall task if necessary.
4872                                                 previous_cpv = previous_cpv[0]
4873                                                 uninst_task = \
4874                                                         ("installed", node.root, previous_cpv, "uninstall")
4875                                                 try:
4876                                                         mygraph.remove(uninst_task)
4877                                                 except KeyError:
4878                                                         pass
4879
4880                                 if uninst_task is not None and \
4881                                         uninst_task not in ignored_uninstall_tasks and \
4882                                         myblocker_uninstalls.contains(uninst_task):
4883                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
4884                                         myblocker_uninstalls.remove(uninst_task)
4885                                         # Discard any blockers that this Uninstall solves.
4886                                         for blocker in blocker_nodes:
4887                                                 if not myblocker_uninstalls.child_nodes(blocker):
4888                                                         myblocker_uninstalls.remove(blocker)
4889                                                         solved_blockers.add(blocker)
4890
4891                                 retlist.append(node)
4892
4893                                 if (isinstance(node, Package) and \
4894                                         "uninstall" == node.operation) or \
4895                                         (uninst_task is not None and \
4896                                         uninst_task in scheduled_uninstalls):
4897                                         # Include satisfied blockers in the merge list
4898                                         # since the user might be interested and also
4899                                         # it serves as an indicator that blocking packages
4900                                         # will be temporarily installed simultaneously.
4901                                         for blocker in solved_blockers:
4902                                                 retlist.append(Blocker(atom=blocker.atom,
4903                                                         root=blocker.root, eapi=blocker.eapi,
4904                                                         satisfied=True))
4905
4906                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
4907                 for node in myblocker_uninstalls.root_nodes():
4908                         unsolvable_blockers.add(node)
4909
4910                 for blocker in unsolvable_blockers:
4911                         retlist.append(blocker)
4912
4913                 # If any Uninstall tasks need to be executed in order
4914                 # to avoid a conflict, complete the graph with any
4915                 # dependencies that may have been initially
4916                 # neglected (to ensure that unsafe Uninstall tasks
4917                 # are properly identified and blocked from execution).
4918                 if have_uninstall_task and \
4919                         not complete and \
4920                         not unsolvable_blockers:
4921                         self.myparams.add("complete")
4922                         raise self._serialize_tasks_retry("")
4923
4924                 if unsolvable_blockers and \
4925                         not self._accept_blocker_conflicts():
4926                         self._unsatisfied_blockers_for_display = unsolvable_blockers
4927                         self._serialized_tasks_cache = retlist[:]
4928                         self._scheduler_graph = scheduler_graph
4929                         raise self._unknown_internal_error()
4930
4931                 if self._slot_collision_info and \
4932                         not self._accept_blocker_conflicts():
4933                         self._serialized_tasks_cache = retlist[:]
4934                         self._scheduler_graph = scheduler_graph
4935                         raise self._unknown_internal_error()
4936
4937                 return retlist, scheduler_graph
4938
4939         def _show_circular_deps(self, mygraph):
4940                 # No leaf nodes are available, so we have a circular
4941                 # dependency panic situation.  Reduce the noise level to a
4942                 # minimum via repeated elimination of root nodes since they
4943                 # have no parents and thus can not be part of a cycle.
4944                 while True:
4945                         root_nodes = mygraph.root_nodes(
4946                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
4947                         if not root_nodes:
4948                                 break
4949                         mygraph.difference_update(root_nodes)
4950                 # Display the USE flags that are enabled on nodes that are part
4951                 # of dependency cycles in case that helps the user decide to
4952                 # disable some of them.
4953                 display_order = []
4954                 tempgraph = mygraph.copy()
4955                 while not tempgraph.empty():
4956                         nodes = tempgraph.leaf_nodes()
4957                         if not nodes:
4958                                 node = tempgraph.order[0]
4959                         else:
4960                                 node = nodes[0]
4961                         display_order.append(node)
4962                         tempgraph.remove(node)
4963                 display_order.reverse()
4964                 self.myopts.pop("--quiet", None)
4965                 self.myopts.pop("--verbose", None)
4966                 self.myopts["--tree"] = True
4967                 portage.writemsg("\n\n", noiselevel=-1)
4968                 self.display(display_order)
4969                 prefix = colorize("BAD", " * ")
4970                 portage.writemsg("\n", noiselevel=-1)
4971                 portage.writemsg(prefix + "Error: circular dependencies:\n",
4972                         noiselevel=-1)
4973                 portage.writemsg("\n", noiselevel=-1)
4974                 mygraph.debug_print()
4975                 portage.writemsg("\n", noiselevel=-1)
4976                 portage.writemsg(prefix + "Note that circular dependencies " + \
4977                         "can often be avoided by temporarily\n", noiselevel=-1)
4978                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
4979                         "optional dependencies.\n", noiselevel=-1)
4980
4981         def _show_merge_list(self):
4982                 if self._serialized_tasks_cache is not None and \
4983                         not (self._displayed_list and \
4984                         (self._displayed_list == self._serialized_tasks_cache or \
4985                         self._displayed_list == \
4986                                 list(reversed(self._serialized_tasks_cache)))):
4987                         display_list = self._serialized_tasks_cache[:]
4988                         if "--tree" in self.myopts:
4989                                 display_list.reverse()
4990                         self.display(display_list)
4991
4992         def _show_unsatisfied_blockers(self, blockers):
4993                 self._show_merge_list()
4994                 msg = "Error: The above package list contains " + \
4995                         "packages which cannot be installed " + \
4996                         "at the same time on the same system."
4997                 prefix = colorize("BAD", " * ")
4998                 from textwrap import wrap
4999                 portage.writemsg("\n", noiselevel=-1)
5000                 for line in wrap(msg, 70):
5001                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
5002
5003                 # Display the conflicting packages along with the packages
5004                 # that pulled them in. This is helpful for troubleshooting
5005                 # cases in which blockers don't solve automatically and
5006                 # the reasons are not apparent from the normal merge list
5007                 # display.
5008
5009                 conflict_pkgs = {}
5010                 for blocker in blockers:
5011                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
5012                                 self._blocker_parents.parent_nodes(blocker)):
5013                                 parent_atoms = self._parent_atoms.get(pkg)
5014                                 if not parent_atoms:
5015                                         atom = self._blocked_world_pkgs.get(pkg)
5016                                         if atom is not None:
5017                                                 parent_atoms = set([("@world", atom)])
5018                                 if parent_atoms:
5019                                         conflict_pkgs[pkg] = parent_atoms
5020
5021                 if conflict_pkgs:
5022                         # Reduce noise by pruning packages that are only
5023                         # pulled in by other conflict packages.
5024                         pruned_pkgs = set()
5025                         for pkg, parent_atoms in conflict_pkgs.iteritems():
5026                                 relevant_parent = False
5027                                 for parent, atom in parent_atoms:
5028                                         if parent not in conflict_pkgs:
5029                                                 relevant_parent = True
5030                                                 break
5031                                 if not relevant_parent:
5032                                         pruned_pkgs.add(pkg)
5033                         for pkg in pruned_pkgs:
5034                                 del conflict_pkgs[pkg]
5035
5036                 if conflict_pkgs:
5037                         msg = []
5038                         msg.append("\n")
5039                         indent = "  "
5040                         # Max number of parents shown, to avoid flooding the display.
5041                         max_parents = 3
5042                         for pkg, parent_atoms in conflict_pkgs.iteritems():
5043
5044                                 pruned_list = set()
5045
5046                                 # Prefer packages that are not directly involved in a conflict.
5047                                 for parent_atom in parent_atoms:
5048                                         if len(pruned_list) >= max_parents:
5049                                                 break
5050                                         parent, atom = parent_atom
5051                                         if parent not in conflict_pkgs:
5052                                                 pruned_list.add(parent_atom)
5053
5054                                 for parent_atom in parent_atoms:
5055                                         if len(pruned_list) >= max_parents:
5056                                                 break
5057                                         pruned_list.add(parent_atom)
5058
5059                                 omitted_parents = len(parent_atoms) - len(pruned_list)
5060                                 msg.append(indent + "%s pulled in by\n" % pkg)
5061
5062                                 for parent_atom in pruned_list:
5063                                         parent, atom = parent_atom
5064                                         msg.append(2*indent)
5065                                         if isinstance(parent,
5066                                                 (PackageArg, AtomArg)):
5067                                                 # For PackageArg and AtomArg types, it's
5068                                                 # redundant to display the atom attribute.
5069                                                 msg.append(str(parent))
5070                                         else:
5071                                                 # Display the specific atom from SetArg or
5072                                                 # Package types.
5073                                                 msg.append("%s required by %s" % (atom, parent))
5074                                         msg.append("\n")
5075
5076                                 if omitted_parents:
5077                                         msg.append(2*indent)
5078                                         msg.append("(and %d more)\n" % omitted_parents)
5079
5080                                 msg.append("\n")
5081
5082                         sys.stderr.write("".join(msg))
5083                         sys.stderr.flush()
5084
5085                 if "--quiet" not in self.myopts:
5086                         show_blocker_docs_link()
5087
5088         def display(self, mylist, favorites=[], verbosity=None):
5089
5090                 # This is used to prevent display_problems() from
5091                 # redundantly displaying this exact same merge list
5092                 # again via _show_merge_list().
5093                 self._displayed_list = mylist
5094
5095                 if verbosity is None:
5096                         verbosity = ("--quiet" in self.myopts and 1 or \
5097                                 "--verbose" in self.myopts and 3 or 2)
5098                 favorites_set = InternalPackageSet(favorites)
5099                 oneshot = "--oneshot" in self.myopts or \
5100                         "--onlydeps" in self.myopts
5101                 columns = "--columns" in self.myopts
5102                 changelogs=[]
5103                 p=[]
5104                 blockers = []
5105
5106                 counters = PackageCounters()
5107
5108                 if verbosity == 1 and "--verbose" not in self.myopts:
5109                         def create_use_string(*args):
5110                                 return ""
5111                 else:
5112                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
5113                                 old_iuse, old_use,
5114                                 is_new, reinst_flags,
5115                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
5116                                 alphabetical=("--alphabetical" in self.myopts)):
5117                                 enabled = []
5118                                 if alphabetical:
5119                                         disabled = enabled
5120                                         removed = enabled
5121                                 else:
5122                                         disabled = []
5123                                         removed = []
5124                                 cur_iuse = set(cur_iuse)
5125                                 enabled_flags = cur_iuse.intersection(cur_use)
5126                                 removed_iuse = set(old_iuse).difference(cur_iuse)
5127                                 any_iuse = cur_iuse.union(old_iuse)
5128                                 any_iuse = list(any_iuse)
5129                                 any_iuse.sort()
5130                                 for flag in any_iuse:
5131                                         flag_str = None
5132                                         isEnabled = False
5133                                         reinst_flag = reinst_flags and flag in reinst_flags
5134                                         if flag in enabled_flags:
5135                                                 isEnabled = True
5136                                                 if is_new or flag in old_use and \
5137                                                         (all_flags or reinst_flag):
5138                                                         flag_str = red(flag)
5139                                                 elif flag not in old_iuse:
5140                                                         flag_str = yellow(flag) + "%*"
5141                                                 elif flag not in old_use:
5142                                                         flag_str = green(flag) + "*"
5143                                         elif flag in removed_iuse:
5144                                                 if all_flags or reinst_flag:
5145                                                         flag_str = yellow("-" + flag) + "%"
5146                                                         if flag in old_use:
5147                                                                 flag_str += "*"
5148                                                         flag_str = "(" + flag_str + ")"
5149                                                         removed.append(flag_str)
5150                                                 continue
5151                                         else:
5152                                                 if is_new or flag in old_iuse and \
5153                                                         flag not in old_use and \
5154                                                         (all_flags or reinst_flag):
5155                                                         flag_str = blue("-" + flag)
5156                                                 elif flag not in old_iuse:
5157                                                         flag_str = yellow("-" + flag)
5158                                                         if flag not in iuse_forced:
5159                                                                 flag_str += "%"
5160                                                 elif flag in old_use:
5161                                                         flag_str = green("-" + flag) + "*"
5162                                         if flag_str:
5163                                                 if flag in iuse_forced:
5164                                                         flag_str = "(" + flag_str + ")"
5165                                                 if isEnabled:
5166                                                         enabled.append(flag_str)
5167                                                 else:
5168                                                         disabled.append(flag_str)
5169
5170                                 if alphabetical:
5171                                         ret = " ".join(enabled)
5172                                 else:
5173                                         ret = " ".join(enabled + disabled + removed)
5174                                 if ret:
5175                                         ret = '%s="%s" ' % (name, ret)
5176                                 return ret
5177
5178                 repo_display = RepoDisplay(self.roots)
5179
5180                 tree_nodes = []
5181                 display_list = []
5182                 mygraph = self.digraph.copy()
5183
5184                 # If there are any Uninstall instances, add the corresponding
5185                 # blockers to the digraph (useful for --tree display).
5186
5187                 executed_uninstalls = set(node for node in mylist \
5188                         if isinstance(node, Package) and node.operation == "unmerge")
5189
5190                 for uninstall in self._blocker_uninstalls.leaf_nodes():
5191                         uninstall_parents = \
5192                                 self._blocker_uninstalls.parent_nodes(uninstall)
5193                         if not uninstall_parents:
5194                                 continue
5195
5196                         # Remove the corresponding "nomerge" node and substitute
5197                         # the Uninstall node.
5198                         inst_pkg = self._pkg_cache[
5199                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
5200                         try:
5201                                 mygraph.remove(inst_pkg)
5202                         except KeyError:
5203                                 pass
5204
5205                         try:
5206                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
5207                         except KeyError:
5208                                 inst_pkg_blockers = []
5209
5210                         # Break the Package -> Uninstall edges.
5211                         mygraph.remove(uninstall)
5212
5213                         # Resolution of a package's blockers
5214                         # depend on it's own uninstallation.
5215                         for blocker in inst_pkg_blockers:
5216                                 mygraph.add(uninstall, blocker)
5217
5218                         # Expand Package -> Uninstall edges into
5219                         # Package -> Blocker -> Uninstall edges.
5220                         for blocker in uninstall_parents:
5221                                 mygraph.add(uninstall, blocker)
5222                                 for parent in self._blocker_parents.parent_nodes(blocker):
5223                                         if parent != inst_pkg:
5224                                                 mygraph.add(blocker, parent)
5225
5226                         # If the uninstall task did not need to be executed because
5227                         # of an upgrade, display Blocker -> Upgrade edges since the
5228                         # corresponding Blocker -> Uninstall edges will not be shown.
5229                         upgrade_node = \
5230                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
5231                         if upgrade_node is not None and \
5232                                 uninstall not in executed_uninstalls:
5233                                 for blocker in uninstall_parents:
5234                                         mygraph.add(upgrade_node, blocker)
5235
5236                 unsatisfied_blockers = []
5237                 i = 0
5238                 depth = 0
5239                 shown_edges = set()
5240                 for x in mylist:
5241                         if isinstance(x, Blocker) and not x.satisfied:
5242                                 unsatisfied_blockers.append(x)
5243                                 continue
5244                         graph_key = x
5245                         if "--tree" in self.myopts:
5246                                 depth = len(tree_nodes)
5247                                 while depth and graph_key not in \
5248                                         mygraph.child_nodes(tree_nodes[depth-1]):
5249                                                 depth -= 1
5250                                 if depth:
5251                                         tree_nodes = tree_nodes[:depth]
5252                                         tree_nodes.append(graph_key)
5253                                         display_list.append((x, depth, True))
5254                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
5255                                 else:
5256                                         traversed_nodes = set() # prevent endless circles
5257                                         traversed_nodes.add(graph_key)
5258                                         def add_parents(current_node, ordered):
5259                                                 parent_nodes = None
5260                                                 # Do not traverse to parents if this node is an
5261                                                 # an argument or a direct member of a set that has
5262                                                 # been specified as an argument (system or world).
5263                                                 if current_node not in self._set_nodes:
5264                                                         parent_nodes = mygraph.parent_nodes(current_node)
5265                                                 if parent_nodes:
5266                                                         child_nodes = set(mygraph.child_nodes(current_node))
5267                                                         selected_parent = None
5268                                                         # First, try to avoid a direct cycle.
5269                                                         for node in parent_nodes:
5270                                                                 if not isinstance(node, (Blocker, Package)):
5271                                                                         continue
5272                                                                 if node not in traversed_nodes and \
5273                                                                         node not in child_nodes:
5274                                                                         edge = (current_node, node)
5275                                                                         if edge in shown_edges:
5276                                                                                 continue
5277                                                                         selected_parent = node
5278                                                                         break
5279                                                         if not selected_parent:
5280                                                                 # A direct cycle is unavoidable.
5281                                                                 for node in parent_nodes:
5282                                                                         if not isinstance(node, (Blocker, Package)):
5283                                                                                 continue
5284                                                                         if node not in traversed_nodes:
5285                                                                                 edge = (current_node, node)
5286                                                                                 if edge in shown_edges:
5287                                                                                         continue
5288                                                                                 selected_parent = node
5289                                                                                 break
5290                                                         if selected_parent:
5291                                                                 shown_edges.add((current_node, selected_parent))
5292                                                                 traversed_nodes.add(selected_parent)
5293                                                                 add_parents(selected_parent, False)
5294                                                 display_list.append((current_node,
5295                                                         len(tree_nodes), ordered))
5296                                                 tree_nodes.append(current_node)
5297                                         tree_nodes = []
5298                                         add_parents(graph_key, True)
5299                         else:
5300                                 display_list.append((x, depth, True))
5301                 mylist = display_list
5302                 for x in unsatisfied_blockers:
5303                         mylist.append((x, 0, True))
5304
5305                 last_merge_depth = 0
5306                 for i in xrange(len(mylist)-1,-1,-1):
5307                         graph_key, depth, ordered = mylist[i]
5308                         if not ordered and depth == 0 and i > 0 \
5309                                 and graph_key == mylist[i-1][0] and \
5310                                 mylist[i-1][1] == 0:
5311                                 # An ordered node got a consecutive duplicate when the tree was
5312                                 # being filled in.
5313                                 del mylist[i]
5314                                 continue
5315                         if ordered and graph_key[-1] != "nomerge":
5316                                 last_merge_depth = depth
5317                                 continue
5318                         if depth >= last_merge_depth or \
5319                                 i < len(mylist) - 1 and \
5320                                 depth >= mylist[i+1][1]:
5321                                         del mylist[i]
5322
5323                 from portage import flatten
5324                 from portage.dep import use_reduce, paren_reduce
5325                 # files to fetch list - avoids counting a same file twice
5326                 # in size display (verbose mode)
5327                 myfetchlist=[]
5328
5329                 # Use this set to detect when all the "repoadd" strings are "[0]"
5330                 # and disable the entire repo display in this case.
5331                 repoadd_set = set()
5332
5333                 for mylist_index in xrange(len(mylist)):
5334                         x, depth, ordered = mylist[mylist_index]
5335                         pkg_type = x[0]
5336                         myroot = x[1]
5337                         pkg_key = x[2]
5338                         portdb = self.trees[myroot]["porttree"].dbapi
5339                         bindb  = self.trees[myroot]["bintree"].dbapi
5340                         vardb = self.trees[myroot]["vartree"].dbapi
5341                         vartree = self.trees[myroot]["vartree"]
5342                         pkgsettings = self.pkgsettings[myroot]
5343
5344                         fetch=" "
5345                         indent = " " * depth
5346
5347                         if isinstance(x, Blocker):
5348                                 if x.satisfied:
5349                                         blocker_style = "PKG_BLOCKER_SATISFIED"
5350                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
5351                                 else:
5352                                         blocker_style = "PKG_BLOCKER"
5353                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
5354                                 if ordered:
5355                                         counters.blocks += 1
5356                                         if x.satisfied:
5357                                                 counters.blocks_satisfied += 1
5358                                 resolved = portage.key_expand(
5359                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
5360                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
5361                                         addl += " " + colorize(blocker_style, resolved)
5362                                 else:
5363                                         addl = "[%s %s] %s%s" % \
5364                                                 (colorize(blocker_style, "blocks"),
5365                                                 addl, indent, colorize(blocker_style, resolved))
5366                                 block_parents = self._blocker_parents.parent_nodes(x)
5367                                 block_parents = set([pnode[2] for pnode in block_parents])
5368                                 block_parents = ", ".join(block_parents)
5369                                 if resolved!=x[2]:
5370                                         addl += colorize(blocker_style,
5371                                                 " (\"%s\" is blocking %s)") % \
5372                                                 (str(x.atom).lstrip("!"), block_parents)
5373                                 else:
5374                                         addl += colorize(blocker_style,
5375                                                 " (is blocking %s)") % block_parents
5376                                 if isinstance(x, Blocker) and x.satisfied:
5377                                         if columns:
5378                                                 continue
5379                                         p.append(addl)
5380                                 else:
5381                                         blockers.append(addl)
5382                         else:
5383                                 pkg_status = x[3]
5384                                 pkg_merge = ordered and pkg_status == "merge"
5385                                 if not pkg_merge and pkg_status == "merge":
5386                                         pkg_status = "nomerge"
5387                                 built = pkg_type != "ebuild"
5388                                 installed = pkg_type == "installed"
5389                                 pkg = x
5390                                 metadata = pkg.metadata
5391                                 ebuild_path = None
5392                                 repo_name = metadata["repository"]
5393                                 if pkg_type == "ebuild":
5394                                         ebuild_path = portdb.findname(pkg_key)
5395                                         if not ebuild_path: # shouldn't happen
5396                                                 raise portage.exception.PackageNotFound(pkg_key)
5397                                         repo_path_real = os.path.dirname(os.path.dirname(
5398                                                 os.path.dirname(ebuild_path)))
5399                                 else:
5400                                         repo_path_real = portdb.getRepositoryPath(repo_name)
5401                                 pkg_use = list(pkg.use.enabled)
5402                                 try:
5403                                         restrict = flatten(use_reduce(paren_reduce(
5404                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
5405                                 except portage.exception.InvalidDependString, e:
5406                                         if not pkg.installed:
5407                                                 show_invalid_depstring_notice(x,
5408                                                         pkg.metadata["RESTRICT"], str(e))
5409                                                 del e
5410                                                 return 1
5411                                         restrict = []
5412                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
5413                                         "fetch" in restrict:
5414                                         fetch = red("F")
5415                                         if ordered:
5416                                                 counters.restrict_fetch += 1
5417                                         if portdb.fetch_check(pkg_key, pkg_use):
5418                                                 fetch = green("f")
5419                                                 if ordered:
5420                                                         counters.restrict_fetch_satisfied += 1
5421
5422                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
5423                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
5424                                 myoldbest = []
5425                                 myinslotlist = None
5426                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
5427                                 if vardb.cpv_exists(pkg_key):
5428                                         addl="  "+yellow("R")+fetch+"  "
5429                                         if ordered:
5430                                                 if pkg_merge:
5431                                                         counters.reinst += 1
5432                                                 elif pkg_status == "uninstall":
5433                                                         counters.uninst += 1
5434                                 # filter out old-style virtual matches
5435                                 elif installed_versions and \
5436                                         portage.cpv_getkey(installed_versions[0]) == \
5437                                         portage.cpv_getkey(pkg_key):
5438                                         myinslotlist = vardb.match(pkg.slot_atom)
5439                                         # If this is the first install of a new-style virtual, we
5440                                         # need to filter out old-style virtual matches.
5441                                         if myinslotlist and \
5442                                                 portage.cpv_getkey(myinslotlist[0]) != \
5443                                                 portage.cpv_getkey(pkg_key):
5444                                                 myinslotlist = None
5445                                         if myinslotlist:
5446                                                 myoldbest = myinslotlist[:]
5447                                                 addl = "   " + fetch
5448                                                 if not portage.dep.cpvequal(pkg_key,
5449                                                         portage.best([pkg_key] + myoldbest)):
5450                                                         # Downgrade in slot
5451                                                         addl += turquoise("U")+blue("D")
5452                                                         if ordered:
5453                                                                 counters.downgrades += 1
5454                                                 else:
5455                                                         # Update in slot
5456                                                         addl += turquoise("U") + " "
5457                                                         if ordered:
5458                                                                 counters.upgrades += 1
5459                                         else:
5460                                                 # New slot, mark it new.
5461                                                 addl = " " + green("NS") + fetch + "  "
5462                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
5463                                                 if ordered:
5464                                                         counters.newslot += 1
5465
5466                                         if "--changelog" in self.myopts:
5467                                                 inst_matches = vardb.match(pkg.slot_atom)
5468                                                 if inst_matches:
5469                                                         changelogs.extend(self.calc_changelog(
5470                                                                 portdb.findname(pkg_key),
5471                                                                 inst_matches[0], pkg_key))
5472                                 else:
5473                                         addl = " " + green("N") + " " + fetch + "  "
5474                                         if ordered:
5475                                                 counters.new += 1
5476
5477                                 verboseadd = ""
5478                                 repoadd = None
5479
5480                                 if True:
5481                                         # USE flag display
5482                                         forced_flags = set()
5483                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
5484                                         forced_flags.update(pkgsettings.useforce)
5485                                         forced_flags.update(pkgsettings.usemask)
5486
5487                                         cur_use = [flag for flag in pkg.use.enabled \
5488                                                 if flag in pkg.iuse.all]
5489                                         cur_iuse = sorted(pkg.iuse.all)
5490
5491                                         if myoldbest and myinslotlist:
5492                                                 previous_cpv = myoldbest[0]
5493                                         else:
5494                                                 previous_cpv = pkg.cpv
5495                                         if vardb.cpv_exists(previous_cpv):
5496                                                 old_iuse, old_use = vardb.aux_get(
5497                                                                 previous_cpv, ["IUSE", "USE"])
5498                                                 old_iuse = list(set(
5499                                                         filter_iuse_defaults(old_iuse.split())))
5500                                                 old_iuse.sort()
5501                                                 old_use = old_use.split()
5502                                                 is_new = False
5503                                         else:
5504                                                 old_iuse = []
5505                                                 old_use = []
5506                                                 is_new = True
5507
5508                                         old_use = [flag for flag in old_use if flag in old_iuse]
5509
5510                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
5511                                         use_expand.sort()
5512                                         use_expand.reverse()
5513                                         use_expand_hidden = \
5514                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
5515
5516                                         def map_to_use_expand(myvals, forcedFlags=False,
5517                                                 removeHidden=True):
5518                                                 ret = {}
5519                                                 forced = {}
5520                                                 for exp in use_expand:
5521                                                         ret[exp] = []
5522                                                         forced[exp] = set()
5523                                                         for val in myvals[:]:
5524                                                                 if val.startswith(exp.lower()+"_"):
5525                                                                         if val in forced_flags:
5526                                                                                 forced[exp].add(val[len(exp)+1:])
5527                                                                         ret[exp].append(val[len(exp)+1:])
5528                                                                         myvals.remove(val)
5529                                                 ret["USE"] = myvals
5530                                                 forced["USE"] = [val for val in myvals \
5531                                                         if val in forced_flags]
5532                                                 if removeHidden:
5533                                                         for exp in use_expand_hidden:
5534                                                                 ret.pop(exp, None)
5535                                                 if forcedFlags:
5536                                                         return ret, forced
5537                                                 return ret
5538
5539                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
5540                                         # are the only thing that triggered reinstallation.
5541                                         reinst_flags_map = {}
5542                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
5543                                         reinst_expand_map = None
5544                                         if reinstall_for_flags:
5545                                                 reinst_flags_map = map_to_use_expand(
5546                                                         list(reinstall_for_flags), removeHidden=False)
5547                                                 for k in list(reinst_flags_map):
5548                                                         if not reinst_flags_map[k]:
5549                                                                 del reinst_flags_map[k]
5550                                                 if not reinst_flags_map.get("USE"):
5551                                                         reinst_expand_map = reinst_flags_map.copy()
5552                                                         reinst_expand_map.pop("USE", None)
5553                                         if reinst_expand_map and \
5554                                                 not set(reinst_expand_map).difference(
5555                                                 use_expand_hidden):
5556                                                 use_expand_hidden = \
5557                                                         set(use_expand_hidden).difference(
5558                                                         reinst_expand_map)
5559
5560                                         cur_iuse_map, iuse_forced = \
5561                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
5562                                         cur_use_map = map_to_use_expand(cur_use)
5563                                         old_iuse_map = map_to_use_expand(old_iuse)
5564                                         old_use_map = map_to_use_expand(old_use)
5565
5566                                         use_expand.sort()
5567                                         use_expand.insert(0, "USE")
5568                                         
5569                                         for key in use_expand:
5570                                                 if key in use_expand_hidden:
5571                                                         continue
5572                                                 verboseadd += create_use_string(key.upper(),
5573                                                         cur_iuse_map[key], iuse_forced[key],
5574                                                         cur_use_map[key], old_iuse_map[key],
5575                                                         old_use_map[key], is_new,
5576                                                         reinst_flags_map.get(key))
5577
5578                                 if verbosity == 3:
5579                                         # size verbose
5580                                         mysize=0
5581                                         if pkg_type == "ebuild" and pkg_merge:
5582                                                 try:
5583                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
5584                                                                 useflags=pkg_use, debug=self.edebug)
5585                                                 except portage.exception.InvalidDependString, e:
5586                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
5587                                                         show_invalid_depstring_notice(x, src_uri, str(e))
5588                                                         del e
5589                                                         return 1
5590                                                 if myfilesdict is None:
5591                                                         myfilesdict="[empty/missing/bad digest]"
5592                                                 else:
5593                                                         for myfetchfile in myfilesdict:
5594                                                                 if myfetchfile not in myfetchlist:
5595                                                                         mysize+=myfilesdict[myfetchfile]
5596                                                                         myfetchlist.append(myfetchfile)
5597                                                         if ordered:
5598                                                                 counters.totalsize += mysize
5599                                                 verboseadd += format_size(mysize)
5600
5601                                         # overlay verbose
5602                                         # assign index for a previous version in the same slot
5603                                         has_previous = False
5604                                         repo_name_prev = None
5605                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
5606                                                 metadata["SLOT"])
5607                                         slot_matches = vardb.match(slot_atom)
5608                                         if slot_matches:
5609                                                 has_previous = True
5610                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
5611                                                         ["repository"])[0]
5612
5613                                         # now use the data to generate output
5614                                         if pkg.installed or not has_previous:
5615                                                 repoadd = repo_display.repoStr(repo_path_real)
5616                                         else:
5617                                                 repo_path_prev = None
5618                                                 if repo_name_prev:
5619                                                         repo_path_prev = portdb.getRepositoryPath(
5620                                                                 repo_name_prev)
5621                                                 if repo_path_prev == repo_path_real:
5622                                                         repoadd = repo_display.repoStr(repo_path_real)
5623                                                 else:
5624                                                         repoadd = "%s=>%s" % (
5625                                                                 repo_display.repoStr(repo_path_prev),
5626                                                                 repo_display.repoStr(repo_path_real))
5627                                         if repoadd:
5628                                                 repoadd_set.add(repoadd)
5629
5630                                 xs = [portage.cpv_getkey(pkg_key)] + \
5631                                         list(portage.catpkgsplit(pkg_key)[2:])
5632                                 if xs[2] == "r0":
5633                                         xs[2] = ""
5634                                 else:
5635                                         xs[2] = "-" + xs[2]
5636
5637                                 mywidth = 130
5638                                 if "COLUMNWIDTH" in self.settings:
5639                                         try:
5640                                                 mywidth = int(self.settings["COLUMNWIDTH"])
5641                                         except ValueError, e:
5642                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
5643                                                 portage.writemsg(
5644                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
5645                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
5646                                                 del e
5647                                 oldlp = mywidth - 30
5648                                 newlp = oldlp - 30
5649
5650                                 # Convert myoldbest from a list to a string.
5651                                 if not myoldbest:
5652                                         myoldbest = ""
5653                                 else:
5654                                         for pos, key in enumerate(myoldbest):
5655                                                 key = portage.catpkgsplit(key)[2] + \
5656                                                         "-" + portage.catpkgsplit(key)[3]
5657                                                 if key[-3:] == "-r0":
5658                                                         key = key[:-3]
5659                                                 myoldbest[pos] = key
5660                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
5661
5662                                 pkg_cp = xs[0]
5663                                 root_config = self.roots[myroot]
5664                                 system_set = root_config.sets["system"]
5665                                 world_set  = root_config.sets["world"]
5666
5667                                 pkg_system = False
5668                                 pkg_world = False
5669                                 try:
5670                                         pkg_system = system_set.findAtomForPackage(pkg)
5671                                         pkg_world  = world_set.findAtomForPackage(pkg)
5672                                         if not (oneshot or pkg_world) and \
5673                                                 myroot == self.target_root and \
5674                                                 favorites_set.findAtomForPackage(pkg):
5675                                                 # Maybe it will be added to world now.
5676                                                 if create_world_atom(pkg, favorites_set, root_config):
5677                                                         pkg_world = True
5678                                 except portage.exception.InvalidDependString:
5679                                         # This is reported elsewhere if relevant.
5680                                         pass
5681
5682                                 def pkgprint(pkg_str):
5683                                         if pkg_merge:
5684                                                 if pkg_system:
5685                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
5686                                                 elif pkg_world:
5687                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
5688                                                 else:
5689                                                         return colorize("PKG_MERGE", pkg_str)
5690                                         elif pkg_status == "uninstall":
5691                                                 return colorize("PKG_UNINSTALL", pkg_str)
5692                                         else:
5693                                                 if pkg_system:
5694                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
5695                                                 elif pkg_world:
5696                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
5697                                                 else:
5698                                                         return colorize("PKG_NOMERGE", pkg_str)
5699
5700                                 try:
5701                                         properties = flatten(use_reduce(paren_reduce(
5702                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
5703                                 except portage.exception.InvalidDependString, e:
5704                                         if not pkg.installed:
5705                                                 show_invalid_depstring_notice(pkg,
5706                                                         pkg.metadata["PROPERTIES"], str(e))
5707                                                 del e
5708                                                 return 1
5709                                         properties = []
5710                                 interactive = "interactive" in properties
5711                                 if interactive and pkg.operation == "merge":
5712                                         addl = colorize("WARN", "I") + addl[1:]
5713                                         if ordered:
5714                                                 counters.interactive += 1
5715
5716                                 if x[1]!="/":
5717                                         if myoldbest:
5718                                                 myoldbest +=" "
5719                                         if "--columns" in self.myopts:
5720                                                 if "--quiet" in self.myopts:
5721                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
5722                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
5723                                                         myprint=myprint+myoldbest
5724                                                         myprint=myprint+darkgreen("to "+x[1])
5725                                                         verboseadd = None
5726                                                 else:
5727                                                         if not pkg_merge:
5728                                                                 myprint = "[%s] %s%s" % \
5729                                                                         (pkgprint(pkg_status.ljust(13)),
5730                                                                         indent, pkgprint(pkg.cp))
5731                                                         else:
5732                                                                 myprint = "[%s %s] %s%s" % \
5733                                                                         (pkgprint(pkg.type_name), addl,
5734                                                                         indent, pkgprint(pkg.cp))
5735                                                         if (newlp-nc_len(myprint)) > 0:
5736                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5737                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
5738                                                         if (oldlp-nc_len(myprint)) > 0:
5739                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
5740                                                         myprint=myprint+myoldbest
5741                                                         myprint += darkgreen("to " + pkg.root)
5742                                         else:
5743                                                 if not pkg_merge:
5744                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
5745                                                 else:
5746                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
5747                                                 myprint += indent + pkgprint(pkg_key) + " " + \
5748                                                         myoldbest + darkgreen("to " + myroot)
5749                                 else:
5750                                         if "--columns" in self.myopts:
5751                                                 if "--quiet" in self.myopts:
5752                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
5753                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
5754                                                         myprint=myprint+myoldbest
5755                                                         verboseadd = None
5756                                                 else:
5757                                                         if not pkg_merge:
5758                                                                 myprint = "[%s] %s%s" % \
5759                                                                         (pkgprint(pkg_status.ljust(13)),
5760                                                                         indent, pkgprint(pkg.cp))
5761                                                         else:
5762                                                                 myprint = "[%s %s] %s%s" % \
5763                                                                         (pkgprint(pkg.type_name), addl,
5764                                                                         indent, pkgprint(pkg.cp))
5765                                                         if (newlp-nc_len(myprint)) > 0:
5766                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
5767                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
5768                                                         if (oldlp-nc_len(myprint)) > 0:
5769                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
5770                                                         myprint += myoldbest
5771                                         else:
5772                                                 if not pkg_merge:
5773                                                         myprint = "[%s] %s%s %s" % \
5774                                                                 (pkgprint(pkg_status.ljust(13)),
5775                                                                 indent, pkgprint(pkg.cpv),
5776                                                                 myoldbest)
5777                                                 else:
5778                                                         myprint = "[%s %s] %s%s %s" % \
5779                                                                 (pkgprint(pkg_type), addl, indent,
5780                                                                 pkgprint(pkg.cpv), myoldbest)
5781
5782                                 if columns and pkg.operation == "uninstall":
5783                                         continue
5784                                 p.append((myprint, verboseadd, repoadd))
5785
5786                                 if "--tree" not in self.myopts and \
5787                                         "--quiet" not in self.myopts and \
5788                                         not self._opts_no_restart.intersection(self.myopts) and \
5789                                         pkg.root == self._running_root.root and \
5790                                         portage.match_from_list(
5791                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
5792                                         not vardb.cpv_exists(pkg.cpv) and \
5793                                         "--quiet" not in self.myopts:
5794                                                 if mylist_index < len(mylist) - 1:
5795                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
5796                                                         p.append(colorize("WARN", "    then resume the merge."))
5797
5798                 out = sys.stdout
5799                 show_repos = repoadd_set and repoadd_set != set(["0"])
5800
5801                 for x in p:
5802                         if isinstance(x, basestring):
5803                                 out.write("%s\n" % (x,))
5804                                 continue
5805
5806                         myprint, verboseadd, repoadd = x
5807
5808                         if verboseadd:
5809                                 myprint += " " + verboseadd
5810
5811                         if show_repos and repoadd:
5812                                 myprint += " " + teal("[%s]" % repoadd)
5813
5814                         out.write("%s\n" % (myprint,))
5815
5816                 for x in blockers:
5817                         print x
5818
5819                 if verbosity == 3:
5820                         print
5821                         print counters
5822                         if show_repos:
5823                                 sys.stdout.write(str(repo_display))
5824
5825                 if "--changelog" in self.myopts:
5826                         print
5827                         for revision,text in changelogs:
5828                                 print bold('*'+revision)
5829                                 sys.stdout.write(text)
5830
5831                 sys.stdout.flush()
5832                 return os.EX_OK
5833
5834         def display_problems(self):
5835                 """
5836                 Display problems with the dependency graph such as slot collisions.
5837                 This is called internally by display() to show the problems _after_
5838                 the merge list where it is most likely to be seen, but if display()
5839                 is not going to be called then this method should be called explicitly
5840                 to ensure that the user is notified of problems with the graph.
5841
5842                 All output goes to stderr, except for unsatisfied dependencies which
5843                 go to stdout for parsing by programs such as autounmask.
5844                 """
5845
5846                 # Note that show_masked_packages() sends it's output to
5847                 # stdout, and some programs such as autounmask parse the
5848                 # output in cases when emerge bails out. However, when
5849                 # show_masked_packages() is called for installed packages
5850                 # here, the message is a warning that is more appropriate
5851                 # to send to stderr, so temporarily redirect stdout to
5852                 # stderr. TODO: Fix output code so there's a cleaner way
5853                 # to redirect everything to stderr.
5854                 sys.stdout.flush()
5855                 sys.stderr.flush()
5856                 stdout = sys.stdout
5857                 try:
5858                         sys.stdout = sys.stderr
5859                         self._display_problems()
5860                 finally:
5861                         sys.stdout = stdout
5862                         sys.stdout.flush()
5863                         sys.stderr.flush()
5864
5865                 # This goes to stdout for parsing by programs like autounmask.
5866                 for pargs, kwargs in self._unsatisfied_deps_for_display:
5867                         self._show_unsatisfied_dep(*pargs, **kwargs)
5868
5869         def _display_problems(self):
5870                 if self._circular_deps_for_display is not None:
5871                         self._show_circular_deps(
5872                                 self._circular_deps_for_display)
5873
5874                 # The user is only notified of a slot conflict if
5875                 # there are no unresolvable blocker conflicts.
5876                 if self._unsatisfied_blockers_for_display is not None:
5877                         self._show_unsatisfied_blockers(
5878                                 self._unsatisfied_blockers_for_display)
5879                 else:
5880                         self._show_slot_collision_notice()
5881
5882                 # TODO: Add generic support for "set problem" handlers so that
5883                 # the below warnings aren't special cases for world only.
5884
5885                 if self._missing_args:
5886                         world_problems = False
5887                         if "world" in self._sets:
5888                                 # Filter out indirect members of world (from nested sets)
5889                                 # since only direct members of world are desired here.
5890                                 world_set = self.roots[self.target_root].sets["world"]
5891                                 for arg, atom in self._missing_args:
5892                                         if arg.name == "world" and atom in world_set:
5893                                                 world_problems = True
5894                                                 break
5895
5896                         if world_problems:
5897                                 sys.stderr.write("\n!!! Problems have been " + \
5898                                         "detected with your world file\n")
5899                                 sys.stderr.write("!!! Please run " + \
5900                                         green("emaint --check world")+"\n\n")
5901
5902                 if self._missing_args:
5903                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5904                                 " Ebuilds for the following packages are either all\n")
5905                         sys.stderr.write(colorize("BAD", "!!!") + \
5906                                 " masked or don't exist:\n")
5907                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
5908                                 self._missing_args) + "\n")
5909
5910                 if self._pprovided_args:
5911                         arg_refs = {}
5912                         for arg, atom in self._pprovided_args:
5913                                 if isinstance(arg, SetArg):
5914                                         parent = arg.name
5915                                         arg_atom = (atom, atom)
5916                                 else:
5917                                         parent = "args"
5918                                         arg_atom = (arg.arg, atom)
5919                                 refs = arg_refs.setdefault(arg_atom, [])
5920                                 if parent not in refs:
5921                                         refs.append(parent)
5922                         msg = []
5923                         msg.append(bad("\nWARNING: "))
5924                         if len(self._pprovided_args) > 1:
5925                                 msg.append("Requested packages will not be " + \
5926                                         "merged because they are listed in\n")
5927                         else:
5928                                 msg.append("A requested package will not be " + \
5929                                         "merged because it is listed in\n")
5930                         msg.append("package.provided:\n\n")
5931                         problems_sets = set()
5932                         for (arg, atom), refs in arg_refs.iteritems():
5933                                 ref_string = ""
5934                                 if refs:
5935                                         problems_sets.update(refs)
5936                                         refs.sort()
5937                                         ref_string = ", ".join(["'%s'" % name for name in refs])
5938                                         ref_string = " pulled in by " + ref_string
5939                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
5940                         msg.append("\n")
5941                         if "world" in problems_sets:
5942                                 msg.append("This problem can be solved in one of the following ways:\n\n")
5943                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
5944                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
5945                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
5946                                 msg.append("The best course of action depends on the reason that an offending\n")
5947                                 msg.append("package.provided entry exists.\n\n")
5948                         sys.stderr.write("".join(msg))
5949
5950                 masked_packages = []
5951                 for pkg in self._masked_installed:
5952                         root_config = pkg.root_config
5953                         pkgsettings = self.pkgsettings[pkg.root]
5954                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
5955                         masked_packages.append((root_config, pkgsettings,
5956                                 pkg.cpv, pkg.metadata, mreasons))
5957                 if masked_packages:
5958                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
5959                                 " The following installed packages are masked:\n")
5960                         show_masked_packages(masked_packages)
5961                         show_mask_docs()
5962                         print
5963
5964         def calc_changelog(self,ebuildpath,current,next):
5965                 if ebuildpath == None or not os.path.exists(ebuildpath):
5966                         return []
5967                 current = '-'.join(portage.catpkgsplit(current)[1:])
5968                 if current.endswith('-r0'):
5969                         current = current[:-3]
5970                 next = '-'.join(portage.catpkgsplit(next)[1:])
5971                 if next.endswith('-r0'):
5972                         next = next[:-3]
5973                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
5974                 try:
5975                         changelog = open(changelogpath).read()
5976                 except SystemExit, e:
5977                         raise # Needed else can't exit
5978                 except:
5979                         return []
5980                 divisions = self.find_changelog_tags(changelog)
5981                 #print 'XX from',current,'to',next
5982                 #for div,text in divisions: print 'XX',div
5983                 # skip entries for all revisions above the one we are about to emerge
5984                 for i in range(len(divisions)):
5985                         if divisions[i][0]==next:
5986                                 divisions = divisions[i:]
5987                                 break
5988                 # find out how many entries we are going to display
5989                 for i in range(len(divisions)):
5990                         if divisions[i][0]==current:
5991                                 divisions = divisions[:i]
5992                                 break
5993                 else:
5994                     # couldnt find the current revision in the list. display nothing
5995                         return []
5996                 return divisions
5997
5998         def find_changelog_tags(self,changelog):
5999                 divs = []
6000                 release = None
6001                 while 1:
6002                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
6003                         if match is None:
6004                                 if release is not None:
6005                                         divs.append((release,changelog))
6006                                 return divs
6007                         if release is not None:
6008                                 divs.append((release,changelog[:match.start()]))
6009                         changelog = changelog[match.end():]
6010                         release = match.group(1)
6011                         if release.endswith('.ebuild'):
6012                                 release = release[:-7]
6013                         if release.endswith('-r0'):
6014                                 release = release[:-3]
6015
6016         def saveNomergeFavorites(self):
6017                 """Find atoms in favorites that are not in the mergelist and add them
6018                 to the world file if necessary."""
6019                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
6020                         "--oneshot", "--onlydeps", "--pretend"):
6021                         if x in self.myopts:
6022                                 return
6023                 root_config = self.roots[self.target_root]
6024                 world_set = root_config.sets["world"]
6025
6026                 world_locked = False
6027                 if hasattr(world_set, "lock"):
6028                         world_set.lock()
6029                         world_locked = True
6030
6031                 if hasattr(world_set, "load"):
6032                         world_set.load() # maybe it's changed on disk
6033
6034                 args_set = self._sets["args"]
6035                 portdb = self.trees[self.target_root]["porttree"].dbapi
6036                 added_favorites = set()
6037                 for x in self._set_nodes:
6038                         pkg_type, root, pkg_key, pkg_status = x
6039                         if pkg_status != "nomerge":
6040                                 continue
6041
6042                         try:
6043                                 myfavkey = create_world_atom(x, args_set, root_config)
6044                                 if myfavkey:
6045                                         if myfavkey in added_favorites:
6046                                                 continue
6047                                         added_favorites.add(myfavkey)
6048                         except portage.exception.InvalidDependString, e:
6049                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
6050                                         (pkg_key, str(e)), noiselevel=-1)
6051                                 writemsg("!!! see '%s'\n\n" % os.path.join(
6052                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
6053                                 del e
6054                 all_added = []
6055                 for k in self._sets:
6056                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
6057                                 continue
6058                         s = SETPREFIX + k
6059                         if s in world_set:
6060                                 continue
6061                         all_added.append(SETPREFIX + k)
6062                 all_added.extend(added_favorites)
6063                 all_added.sort()
6064                 for a in all_added:
6065                         print ">>> Recording %s in \"world\" favorites file..." % \
6066                                 colorize("INFORM", str(a))
6067                 if all_added:
6068                         world_set.update(all_added)
6069
6070                 if world_locked:
6071                         world_set.unlock()
6072
6073         def loadResumeCommand(self, resume_data, skip_masked=True,
6074                 skip_missing=True):
6075                 """
6076                 Add a resume command to the graph and validate it in the process.  This
6077                 will raise a PackageNotFound exception if a package is not available.
6078                 """
6079
6080                 if not isinstance(resume_data, dict):
6081                         return False
6082
6083                 mergelist = resume_data.get("mergelist")
6084                 if not isinstance(mergelist, list):
6085                         mergelist = []
6086
6087                 fakedb = self.mydbapi
6088                 trees = self.trees
6089                 serialized_tasks = []
6090                 masked_tasks = []
6091                 for x in mergelist:
6092                         if not (isinstance(x, list) and len(x) == 4):
6093                                 continue
6094                         pkg_type, myroot, pkg_key, action = x
6095                         if pkg_type not in self.pkg_tree_map:
6096                                 continue
6097                         if action != "merge":
6098                                 continue
6099                         tree_type = self.pkg_tree_map[pkg_type]
6100                         mydb = trees[myroot][tree_type].dbapi
6101                         db_keys = list(self._trees_orig[myroot][
6102                                 tree_type].dbapi._aux_cache_keys)
6103                         try:
6104                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
6105                         except KeyError:
6106                                 # It does no exist or it is corrupt.
6107                                 if action == "uninstall":
6108                                         continue
6109                                 if skip_missing:
6110                                         # TODO: log these somewhere
6111                                         continue
6112                                 raise portage.exception.PackageNotFound(pkg_key)
6113                         installed = action == "uninstall"
6114                         built = pkg_type != "ebuild"
6115                         root_config = self.roots[myroot]
6116                         pkg = Package(built=built, cpv=pkg_key,
6117                                 installed=installed, metadata=metadata,
6118                                 operation=action, root_config=root_config,
6119                                 type_name=pkg_type)
6120                         if pkg_type == "ebuild":
6121                                 pkgsettings = self.pkgsettings[myroot]
6122                                 pkgsettings.setcpv(pkg)
6123                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6124                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
6125                         self._pkg_cache[pkg] = pkg
6126
6127                         root_config = self.roots[pkg.root]
6128                         if "merge" == pkg.operation and \
6129                                 not visible(root_config.settings, pkg):
6130                                 if skip_masked:
6131                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
6132                                 else:
6133                                         self._unsatisfied_deps_for_display.append(
6134                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
6135
6136                         fakedb[myroot].cpv_inject(pkg)
6137                         serialized_tasks.append(pkg)
6138                         self.spinner.update()
6139
6140                 if self._unsatisfied_deps_for_display:
6141                         return False
6142
6143                 if not serialized_tasks or "--nodeps" in self.myopts:
6144                         self._serialized_tasks_cache = serialized_tasks
6145                         self._scheduler_graph = self.digraph
6146                 else:
6147                         self._select_package = self._select_pkg_from_graph
6148                         self.myparams.add("selective")
6149                         # Always traverse deep dependencies in order to account for
6150                         # potentially unsatisfied dependencies of installed packages.
6151                         # This is necessary for correct --keep-going or --resume operation
6152                         # in case a package from a group of circularly dependent packages
6153                         # fails. In this case, a package which has recently been installed
6154                         # may have an unsatisfied circular dependency (pulled in by
6155                         # PDEPEND, for example). So, even though a package is already
6156                         # installed, it may not have all of it's dependencies satisfied, so
6157                         # it may not be usable. If such a package is in the subgraph of
6158                         # deep depenedencies of a scheduled build, that build needs to
6159                         # be cancelled. In order for this type of situation to be
6160                         # recognized, deep traversal of dependencies is required.
6161                         self.myparams.add("deep")
6162
6163                         favorites = resume_data.get("favorites")
6164                         args_set = self._sets["args"]
6165                         if isinstance(favorites, list):
6166                                 args = self._load_favorites(favorites)
6167                         else:
6168                                 args = []
6169
6170                         for task in serialized_tasks:
6171                                 if isinstance(task, Package) and \
6172                                         task.operation == "merge":
6173                                         if not self._add_pkg(task, None):
6174                                                 return False
6175
6176                         # Packages for argument atoms need to be explicitly
6177                         # added via _add_pkg() so that they are included in the
6178                         # digraph (needed at least for --tree display).
6179                         for arg in args:
6180                                 for atom in arg.set:
6181                                         pkg, existing_node = self._select_package(
6182                                                 arg.root_config.root, atom)
6183                                         if existing_node is None and \
6184                                                 pkg is not None:
6185                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
6186                                                         root=pkg.root, parent=arg)):
6187                                                         return False
6188
6189                         # Allow unsatisfied deps here to avoid showing a masking
6190                         # message for an unsatisfied dep that isn't necessarily
6191                         # masked.
6192                         if not self._create_graph(allow_unsatisfied=True):
6193                                 return False
6194
6195                         unsatisfied_deps = []
6196                         for dep in self._unsatisfied_deps:
6197                                 if not isinstance(dep.parent, Package):
6198                                         continue
6199                                 if dep.parent.operation == "merge":
6200                                         unsatisfied_deps.append(dep)
6201                                         continue
6202
6203                                 # For unsatisfied deps of installed packages, only account for
6204                                 # them if they are in the subgraph of dependencies of a package
6205                                 # which is scheduled to be installed.
6206                                 unsatisfied_install = False
6207                                 traversed = set()
6208                                 dep_stack = self.digraph.parent_nodes(dep.parent)
6209                                 while dep_stack:
6210                                         node = dep_stack.pop()
6211                                         if not isinstance(node, Package):
6212                                                 continue
6213                                         if node.operation == "merge":
6214                                                 unsatisfied_install = True
6215                                                 break
6216                                         if node in traversed:
6217                                                 continue
6218                                         traversed.add(node)
6219                                         dep_stack.extend(self.digraph.parent_nodes(node))
6220
6221                                 if unsatisfied_install:
6222                                         unsatisfied_deps.append(dep)
6223
6224                         if masked_tasks or unsatisfied_deps:
6225                                 # This probably means that a required package
6226                                 # was dropped via --skipfirst. It makes the
6227                                 # resume list invalid, so convert it to a
6228                                 # UnsatisfiedResumeDep exception.
6229                                 raise self.UnsatisfiedResumeDep(self,
6230                                         masked_tasks + unsatisfied_deps)
6231                         self._serialized_tasks_cache = None
6232                         try:
6233                                 self.altlist()
6234                         except self._unknown_internal_error:
6235                                 return False
6236
6237                 return True
6238
6239         def _load_favorites(self, favorites):
6240                 """
6241                 Use a list of favorites to resume state from a
6242                 previous select_files() call. This creates similar
6243                 DependencyArg instances to those that would have
6244                 been created by the original select_files() call.
6245                 This allows Package instances to be matched with
6246                 DependencyArg instances during graph creation.
6247                 """
6248                 root_config = self.roots[self.target_root]
6249                 getSetAtoms = root_config.setconfig.getSetAtoms
6250                 sets = root_config.sets
6251                 args = []
6252                 for x in favorites:
6253                         if not isinstance(x, basestring):
6254                                 continue
6255                         if x in ("system", "world"):
6256                                 x = SETPREFIX + x
6257                         if x.startswith(SETPREFIX):
6258                                 s = x[len(SETPREFIX):]
6259                                 if s not in sets:
6260                                         continue
6261                                 if s in self._sets:
6262                                         continue
6263                                 # Recursively expand sets so that containment tests in
6264                                 # self._get_parent_sets() properly match atoms in nested
6265                                 # sets (like if world contains system).
6266                                 expanded_set = InternalPackageSet(
6267                                         initial_atoms=getSetAtoms(s))
6268                                 self._sets[s] = expanded_set
6269                                 args.append(SetArg(arg=x, set=expanded_set,
6270                                         root_config=root_config))
6271                         else:
6272                                 if not portage.isvalidatom(x):
6273                                         continue
6274                                 args.append(AtomArg(arg=x, atom=x,
6275                                         root_config=root_config))
6276
6277                 self._set_args(args)
6278                 return args
6279
6280         class UnsatisfiedResumeDep(portage.exception.PortageException):
6281                 """
6282                 A dependency of a resume list is not installed. This
6283                 can occur when a required package is dropped from the
6284                 merge list via --skipfirst.
6285                 """
6286                 def __init__(self, depgraph, value):
6287                         portage.exception.PortageException.__init__(self, value)
6288                         self.depgraph = depgraph
6289
6290         class _internal_exception(portage.exception.PortageException):
6291                 def __init__(self, value=""):
6292                         portage.exception.PortageException.__init__(self, value)
6293
6294         class _unknown_internal_error(_internal_exception):
6295                 """
6296                 Used by the depgraph internally to terminate graph creation.
6297                 The specific reason for the failure should have been dumped
6298                 to stderr, unfortunately, the exact reason for the failure
6299                 may not be known.
6300                 """
6301
6302         class _serialize_tasks_retry(_internal_exception):
6303                 """
6304                 This is raised by the _serialize_tasks() method when it needs to
6305                 be called again for some reason. The only case that it's currently
6306                 used for is when neglected dependencies need to be added to the
6307                 graph in order to avoid making a potentially unsafe decision.
6308                 """
6309
6310         class _dep_check_composite_db(portage.dbapi):
6311                 """
6312                 A dbapi-like interface that is optimized for use in dep_check() calls.
6313                 This is built on top of the existing depgraph package selection logic.
6314                 Some packages that have been added to the graph may be masked from this
6315                 view in order to influence the atom preference selection that occurs
6316                 via dep_check().
6317                 """
6318                 def __init__(self, depgraph, root):
6319                         portage.dbapi.__init__(self)
6320                         self._depgraph = depgraph
6321                         self._root = root
6322                         self._match_cache = {}
6323                         self._cpv_pkg_map = {}
6324
6325                 def _clear_cache(self):
6326                         self._match_cache.clear()
6327                         self._cpv_pkg_map.clear()
6328
6329                 def match(self, atom):
6330                         ret = self._match_cache.get(atom)
6331                         if ret is not None:
6332                                 return ret[:]
6333                         orig_atom = atom
6334                         if "/" not in atom:
6335                                 atom = self._dep_expand(atom)
6336                         pkg, existing = self._depgraph._select_package(self._root, atom)
6337                         if not pkg:
6338                                 ret = []
6339                         else:
6340                                 # Return the highest available from select_package() as well as
6341                                 # any matching slots in the graph db.
6342                                 slots = set()
6343                                 slots.add(pkg.metadata["SLOT"])
6344                                 atom_cp = portage.dep_getkey(atom)
6345                                 if pkg.cp.startswith("virtual/"):
6346                                         # For new-style virtual lookahead that occurs inside
6347                                         # dep_check(), examine all slots. This is needed
6348                                         # so that newer slots will not unnecessarily be pulled in
6349                                         # when a satisfying lower slot is already installed. For
6350                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
6351                                         # there's no need to pull in a newer slot to satisfy a
6352                                         # virtual/jdk dependency.
6353                                         for db, pkg_type, built, installed, db_keys in \
6354                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
6355                                                 for cpv in db.match(atom):
6356                                                         if portage.cpv_getkey(cpv) != pkg.cp:
6357                                                                 continue
6358                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
6359                                 ret = []
6360                                 if self._visible(pkg):
6361                                         self._cpv_pkg_map[pkg.cpv] = pkg
6362                                         ret.append(pkg.cpv)
6363                                 slots.remove(pkg.metadata["SLOT"])
6364                                 while slots:
6365                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
6366                                         pkg, existing = self._depgraph._select_package(
6367                                                 self._root, slot_atom)
6368                                         if not pkg:
6369                                                 continue
6370                                         if not self._visible(pkg):
6371                                                 continue
6372                                         self._cpv_pkg_map[pkg.cpv] = pkg
6373                                         ret.append(pkg.cpv)
6374                                 if ret:
6375                                         self._cpv_sort_ascending(ret)
6376                         self._match_cache[orig_atom] = ret
6377                         return ret[:]
6378
6379                 def _visible(self, pkg):
6380                         if pkg.installed and "selective" not in self._depgraph.myparams:
6381                                 try:
6382                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
6383                                 except (StopIteration, portage.exception.InvalidDependString):
6384                                         arg = None
6385                                 if arg:
6386                                         return False
6387                         if pkg.installed:
6388                                 try:
6389                                         if not visible(
6390                                                 self._depgraph.pkgsettings[pkg.root], pkg):
6391                                                 return False
6392                                 except portage.exception.InvalidDependString:
6393                                         pass
6394                         in_graph = self._depgraph._slot_pkg_map[
6395                                 self._root].get(pkg.slot_atom)
6396                         if in_graph is None:
6397                                 # Mask choices for packages which are not the highest visible
6398                                 # version within their slot (since they usually trigger slot
6399                                 # conflicts).
6400                                 highest_visible, in_graph = self._depgraph._select_package(
6401                                         self._root, pkg.slot_atom)
6402                                 if pkg != highest_visible:
6403                                         return False
6404                         elif in_graph != pkg:
6405                                 # Mask choices for packages that would trigger a slot
6406                                 # conflict with a previously selected package.
6407                                 return False
6408                         return True
6409
6410                 def _dep_expand(self, atom):
6411                         """
6412                         This is only needed for old installed packages that may
6413                         contain atoms that are not fully qualified with a specific
6414                         category. Emulate the cpv_expand() function that's used by
6415                         dbapi.match() in cases like this. If there are multiple
6416                         matches, it's often due to a new-style virtual that has
6417                         been added, so try to filter those out to avoid raising
6418                         a ValueError.
6419                         """
6420                         root_config = self._depgraph.roots[self._root]
6421                         orig_atom = atom
6422                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
6423                         if len(expanded_atoms) > 1:
6424                                 non_virtual_atoms = []
6425                                 for x in expanded_atoms:
6426                                         if not portage.dep_getkey(x).startswith("virtual/"):
6427                                                 non_virtual_atoms.append(x)
6428                                 if len(non_virtual_atoms) == 1:
6429                                         expanded_atoms = non_virtual_atoms
6430                         if len(expanded_atoms) > 1:
6431                                 # compatible with portage.cpv_expand()
6432                                 raise portage.exception.AmbiguousPackageName(
6433                                         [portage.dep_getkey(x) for x in expanded_atoms])
6434                         if expanded_atoms:
6435                                 atom = expanded_atoms[0]
6436                         else:
6437                                 null_atom = insert_category_into_atom(atom, "null")
6438                                 null_cp = portage.dep_getkey(null_atom)
6439                                 cat, atom_pn = portage.catsplit(null_cp)
6440                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
6441                                 if virts_p:
6442                                         # Allow the resolver to choose which virtual.
6443                                         atom = insert_category_into_atom(atom, "virtual")
6444                                 else:
6445                                         atom = insert_category_into_atom(atom, "null")
6446                         return atom
6447
6448                 def aux_get(self, cpv, wants):
6449                         metadata = self._cpv_pkg_map[cpv].metadata
6450                         return [metadata.get(x, "") for x in wants]
6451
6452 class PackageCounters(object):
6453
6454         def __init__(self):
6455                 self.upgrades   = 0
6456                 self.downgrades = 0
6457                 self.new        = 0
6458                 self.newslot    = 0
6459                 self.reinst     = 0
6460                 self.uninst     = 0
6461                 self.blocks     = 0
6462                 self.blocks_satisfied         = 0
6463                 self.totalsize  = 0
6464                 self.restrict_fetch           = 0
6465                 self.restrict_fetch_satisfied = 0
6466                 self.interactive              = 0
6467
6468         def __str__(self):
6469                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
6470                 myoutput = []
6471                 details = []
6472                 myoutput.append("Total: %s package" % total_installs)
6473                 if total_installs != 1:
6474                         myoutput.append("s")
6475                 if total_installs != 0:
6476                         myoutput.append(" (")
6477                 if self.upgrades > 0:
6478                         details.append("%s upgrade" % self.upgrades)
6479                         if self.upgrades > 1:
6480                                 details[-1] += "s"
6481                 if self.downgrades > 0:
6482                         details.append("%s downgrade" % self.downgrades)
6483                         if self.downgrades > 1:
6484                                 details[-1] += "s"
6485                 if self.new > 0:
6486                         details.append("%s new" % self.new)
6487                 if self.newslot > 0:
6488                         details.append("%s in new slot" % self.newslot)
6489                         if self.newslot > 1:
6490                                 details[-1] += "s"
6491                 if self.reinst > 0:
6492                         details.append("%s reinstall" % self.reinst)
6493                         if self.reinst > 1:
6494                                 details[-1] += "s"
6495                 if self.uninst > 0:
6496                         details.append("%s uninstall" % self.uninst)
6497                         if self.uninst > 1:
6498                                 details[-1] += "s"
6499                 if self.interactive > 0:
6500                         details.append("%s %s" % (self.interactive,
6501                                 colorize("WARN", "interactive")))
6502                 myoutput.append(", ".join(details))
6503                 if total_installs != 0:
6504                         myoutput.append(")")
6505                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
6506                 if self.restrict_fetch:
6507                         myoutput.append("\nFetch Restriction: %s package" % \
6508                                 self.restrict_fetch)
6509                         if self.restrict_fetch > 1:
6510                                 myoutput.append("s")
6511                 if self.restrict_fetch_satisfied < self.restrict_fetch:
6512                         myoutput.append(bad(" (%s unsatisfied)") % \
6513                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
6514                 if self.blocks > 0:
6515                         myoutput.append("\nConflict: %s block" % \
6516                                 self.blocks)
6517                         if self.blocks > 1:
6518                                 myoutput.append("s")
6519                         if self.blocks_satisfied < self.blocks:
6520                                 myoutput.append(bad(" (%s unsatisfied)") % \
6521                                         (self.blocks - self.blocks_satisfied))
6522                 return "".join(myoutput)
6523
6524
6525 _can_poll_device = None
6526
6527 def can_poll_device():
6528         """
6529         Test if it's possible to use poll() on a device such as a pty. This
6530         is known to fail on Darwin.
6531         @rtype: bool
6532         @returns: True if poll() on a device succeeds, False otherwise.
6533         """
6534
6535         global _can_poll_device
6536         if _can_poll_device is not None:
6537                 return _can_poll_device
6538
6539         if not hasattr(select, "poll"):
6540                 _can_poll_device = False
6541                 return _can_poll_device
6542
6543         try:
6544                 dev_null = open('/dev/null', 'rb')
6545         except IOError:
6546                 _can_poll_device = False
6547                 return _can_poll_device
6548
6549         p = select.poll()
6550         p.register(dev_null.fileno(), PollConstants.POLLIN)
6551
6552         invalid_request = False
6553         for f, event in p.poll():
6554                 if event & PollConstants.POLLNVAL:
6555                         invalid_request = True
6556                         break
6557         dev_null.close()
6558
6559         _can_poll_device = not invalid_request
6560         return _can_poll_device
6561
6562 def create_poll_instance():
6563         """
6564         Create an instance of select.poll, or an instance of
6565         PollSelectAdapter there is no poll() implementation or
6566         it is broken somehow.
6567         """
6568         if can_poll_device():
6569                 return select.poll()
6570         return PollSelectAdapter()
6571
6572 getloadavg = getattr(os, "getloadavg", None)
6573 if getloadavg is None:
6574         def getloadavg():
6575                 """
6576                 Uses /proc/loadavg to emulate os.getloadavg().
6577                 Raises OSError if the load average was unobtainable.
6578                 """
6579                 try:
6580                         loadavg_str = open('/proc/loadavg').readline()
6581                 except IOError:
6582                         # getloadavg() is only supposed to raise OSError, so convert
6583                         raise OSError('unknown')
6584                 loadavg_split = loadavg_str.split()
6585                 if len(loadavg_split) < 3:
6586                         raise OSError('unknown')
6587                 loadavg_floats = []
6588                 for i in xrange(3):
6589                         try:
6590                                 loadavg_floats.append(float(loadavg_split[i]))
6591                         except ValueError:
6592                                 raise OSError('unknown')
6593                 return tuple(loadavg_floats)
6594
6595 class PollScheduler(object):
6596
6597         class _sched_iface_class(SlotObject):
6598                 __slots__ = ("register", "schedule", "unregister")
6599
6600         def __init__(self):
6601                 self._max_jobs = 1
6602                 self._max_load = None
6603                 self._jobs = 0
6604                 self._poll_event_queue = []
6605                 self._poll_event_handlers = {}
6606                 self._poll_event_handler_ids = {}
6607                 # Increment id for each new handler.
6608                 self._event_handler_id = 0
6609                 self._poll_obj = create_poll_instance()
6610                 self._scheduling = False
6611
6612         def _schedule(self):
6613                 """
6614                 Calls _schedule_tasks() and automatically returns early from
6615                 any recursive calls to this method that the _schedule_tasks()
6616                 call might trigger. This makes _schedule() safe to call from
6617                 inside exit listeners.
6618                 """
6619                 if self._scheduling:
6620                         return False
6621                 self._scheduling = True
6622                 try:
6623                         return self._schedule_tasks()
6624                 finally:
6625                         self._scheduling = False
6626
6627         def _running_job_count(self):
6628                 return self._jobs
6629
6630         def _can_add_job(self):
6631                 max_jobs = self._max_jobs
6632                 max_load = self._max_load
6633
6634                 if self._max_jobs is not True and \
6635                         self._running_job_count() >= self._max_jobs:
6636                         return False
6637
6638                 if max_load is not None and \
6639                         (max_jobs is True or max_jobs > 1) and \
6640                         self._running_job_count() >= 1:
6641                         try:
6642                                 avg1, avg5, avg15 = getloadavg()
6643                         except OSError:
6644                                 return False
6645
6646                         if avg1 >= max_load:
6647                                 return False
6648
6649                 return True
6650
6651         def _poll(self, timeout=None):
6652                 """
6653                 All poll() calls pass through here. The poll events
6654                 are added directly to self._poll_event_queue.
6655                 In order to avoid endless blocking, this raises
6656                 StopIteration if timeout is None and there are
6657                 no file descriptors to poll.
6658                 """
6659                 if not self._poll_event_handlers:
6660                         self._schedule()
6661                         if timeout is None and \
6662                                 not self._poll_event_handlers:
6663                                 raise StopIteration(
6664                                         "timeout is None and there are no poll() event handlers")
6665
6666                 # The following error is known to occur with Linux kernel versions
6667                 # less than 2.6.24:
6668                 #
6669                 #   select.error: (4, 'Interrupted system call')
6670                 #
6671                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
6672                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
6673                 # without any events.
6674                 while True:
6675                         try:
6676                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
6677                                 break
6678                         except select.error, e:
6679                                 writemsg_level("\n!!! select error: %s\n" % (e,),
6680                                         level=logging.ERROR, noiselevel=-1)
6681                                 del e
6682                                 if timeout is not None:
6683                                         break
6684
6685         def _next_poll_event(self, timeout=None):
6686                 """
6687                 Since the _schedule_wait() loop is called by event
6688                 handlers from _poll_loop(), maintain a central event
6689                 queue for both of them to share events from a single
6690                 poll() call. In order to avoid endless blocking, this
6691                 raises StopIteration if timeout is None and there are
6692                 no file descriptors to poll.
6693                 """
6694                 if not self._poll_event_queue:
6695                         self._poll(timeout)
6696                 return self._poll_event_queue.pop()
6697
6698         def _poll_loop(self):
6699
6700                 event_handlers = self._poll_event_handlers
6701                 event_handled = False
6702
6703                 try:
6704                         while event_handlers:
6705                                 f, event = self._next_poll_event()
6706                                 handler, reg_id = event_handlers[f]
6707                                 handler(f, event)
6708                                 event_handled = True
6709                 except StopIteration:
6710                         event_handled = True
6711
6712                 if not event_handled:
6713                         raise AssertionError("tight loop")
6714
6715         def _schedule_yield(self):
6716                 """
6717                 Schedule for a short period of time chosen by the scheduler based
6718                 on internal state. Synchronous tasks should call this periodically
6719                 in order to allow the scheduler to service pending poll events. The
6720                 scheduler will call poll() exactly once, without blocking, and any
6721                 resulting poll events will be serviced.
6722                 """
6723                 event_handlers = self._poll_event_handlers
6724                 events_handled = 0
6725
6726                 if not event_handlers:
6727                         return bool(events_handled)
6728
6729                 if not self._poll_event_queue:
6730                         self._poll(0)
6731
6732                 try:
6733                         while event_handlers and self._poll_event_queue:
6734                                 f, event = self._next_poll_event()
6735                                 handler, reg_id = event_handlers[f]
6736                                 handler(f, event)
6737                                 events_handled += 1
6738                 except StopIteration:
6739                         events_handled += 1
6740
6741                 return bool(events_handled)
6742
6743         def _register(self, f, eventmask, handler):
6744                 """
6745                 @rtype: Integer
6746                 @return: A unique registration id, for use in schedule() or
6747                         unregister() calls.
6748                 """
6749                 if f in self._poll_event_handlers:
6750                         raise AssertionError("fd %d is already registered" % f)
6751                 self._event_handler_id += 1
6752                 reg_id = self._event_handler_id
6753                 self._poll_event_handler_ids[reg_id] = f
6754                 self._poll_event_handlers[f] = (handler, reg_id)
6755                 self._poll_obj.register(f, eventmask)
6756                 return reg_id
6757
6758         def _unregister(self, reg_id):
6759                 f = self._poll_event_handler_ids[reg_id]
6760                 self._poll_obj.unregister(f)
6761                 del self._poll_event_handlers[f]
6762                 del self._poll_event_handler_ids[reg_id]
6763
6764         def _schedule_wait(self, wait_ids):
6765                 """
6766                 Schedule until wait_id is not longer registered
6767                 for poll() events.
6768                 @type wait_id: int
6769                 @param wait_id: a task id to wait for
6770                 """
6771                 event_handlers = self._poll_event_handlers
6772                 handler_ids = self._poll_event_handler_ids
6773                 event_handled = False
6774
6775                 if isinstance(wait_ids, int):
6776                         wait_ids = frozenset([wait_ids])
6777
6778                 try:
6779                         while wait_ids.intersection(handler_ids):
6780                                 f, event = self._next_poll_event()
6781                                 handler, reg_id = event_handlers[f]
6782                                 handler(f, event)
6783                                 event_handled = True
6784                 except StopIteration:
6785                         event_handled = True
6786
6787                 return event_handled
6788
6789 class QueueScheduler(PollScheduler):
6790
6791         """
6792         Add instances of SequentialTaskQueue and then call run(). The
6793         run() method returns when no tasks remain.
6794         """
6795
6796         def __init__(self, max_jobs=None, max_load=None):
6797                 PollScheduler.__init__(self)
6798
6799                 if max_jobs is None:
6800                         max_jobs = 1
6801
6802                 self._max_jobs = max_jobs
6803                 self._max_load = max_load
6804                 self.sched_iface = self._sched_iface_class(
6805                         register=self._register,
6806                         schedule=self._schedule_wait,
6807                         unregister=self._unregister)
6808
6809                 self._queues = []
6810                 self._schedule_listeners = []
6811
6812         def add(self, q):
6813                 self._queues.append(q)
6814
6815         def remove(self, q):
6816                 self._queues.remove(q)
6817
6818         def run(self):
6819
6820                 while self._schedule():
6821                         self._poll_loop()
6822
6823                 while self._running_job_count():
6824                         self._poll_loop()
6825
6826         def _schedule_tasks(self):
6827                 """
6828                 @rtype: bool
6829                 @returns: True if there may be remaining tasks to schedule,
6830                         False otherwise.
6831                 """
6832                 while self._can_add_job():
6833                         n = self._max_jobs - self._running_job_count()
6834                         if n < 1:
6835                                 break
6836
6837                         if not self._start_next_job(n):
6838                                 return False
6839
6840                 for q in self._queues:
6841                         if q:
6842                                 return True
6843                 return False
6844
6845         def _running_job_count(self):
6846                 job_count = 0
6847                 for q in self._queues:
6848                         job_count += len(q.running_tasks)
6849                 self._jobs = job_count
6850                 return job_count
6851
6852         def _start_next_job(self, n=1):
6853                 started_count = 0
6854                 for q in self._queues:
6855                         initial_job_count = len(q.running_tasks)
6856                         q.schedule()
6857                         final_job_count = len(q.running_tasks)
6858                         if final_job_count > initial_job_count:
6859                                 started_count += (final_job_count - initial_job_count)
6860                         if started_count >= n:
6861                                 break
6862                 return started_count
6863
6864 class TaskScheduler(object):
6865
6866         """
6867         A simple way to handle scheduling of AsynchrousTask instances. Simply
6868         add tasks and call run(). The run() method returns when no tasks remain.
6869         """
6870
6871         def __init__(self, max_jobs=None, max_load=None):
6872                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
6873                 self._scheduler = QueueScheduler(
6874                         max_jobs=max_jobs, max_load=max_load)
6875                 self.sched_iface = self._scheduler.sched_iface
6876                 self.run = self._scheduler.run
6877                 self._scheduler.add(self._queue)
6878
6879         def add(self, task):
6880                 self._queue.add(task)
6881
6882 class JobStatusDisplay(object):
6883
6884         _bound_properties = ("curval", "failed", "running")
6885         _jobs_column_width = 48
6886
6887         # Don't update the display unless at least this much
6888         # time has passed, in units of seconds.
6889         _min_display_latency = 2
6890
6891         _default_term_codes = {
6892                 'cr'  : '\r',
6893                 'el'  : '\x1b[K',
6894                 'nel' : '\n',
6895         }
6896
6897         _termcap_name_map = {
6898                 'carriage_return' : 'cr',
6899                 'clr_eol'         : 'el',
6900                 'newline'         : 'nel',
6901         }
6902
6903         def __init__(self, out=sys.stdout, quiet=False, xterm_titles=True):
6904                 object.__setattr__(self, "out", out)
6905                 object.__setattr__(self, "quiet", quiet)
6906                 object.__setattr__(self, "xterm_titles", xterm_titles)
6907                 object.__setattr__(self, "maxval", 0)
6908                 object.__setattr__(self, "merges", 0)
6909                 object.__setattr__(self, "_changed", False)
6910                 object.__setattr__(self, "_displayed", False)
6911                 object.__setattr__(self, "_last_display_time", 0)
6912                 object.__setattr__(self, "width", 80)
6913                 self.reset()
6914
6915                 isatty = hasattr(out, "isatty") and out.isatty()
6916                 object.__setattr__(self, "_isatty", isatty)
6917                 if not isatty or not self._init_term():
6918                         term_codes = {}
6919                         for k, capname in self._termcap_name_map.iteritems():
6920                                 term_codes[k] = self._default_term_codes[capname]
6921                         object.__setattr__(self, "_term_codes", term_codes)
6922                 encoding = sys.getdefaultencoding()
6923                 for k, v in self._term_codes.items():
6924                         if not isinstance(v, basestring):
6925                                 self._term_codes[k] = v.decode(encoding, 'replace')
6926
6927         def _init_term(self):
6928                 """
6929                 Initialize term control codes.
6930                 @rtype: bool
6931                 @returns: True if term codes were successfully initialized,
6932                         False otherwise.
6933                 """
6934
6935                 term_type = os.environ.get("TERM", "vt100")
6936                 tigetstr = None
6937
6938                 try:
6939                         import curses
6940                         try:
6941                                 curses.setupterm(term_type, self.out.fileno())
6942                                 tigetstr = curses.tigetstr
6943                         except curses.error:
6944                                 pass
6945                 except ImportError:
6946                         pass
6947
6948                 if tigetstr is None:
6949                         return False
6950
6951                 term_codes = {}
6952                 for k, capname in self._termcap_name_map.iteritems():
6953                         code = tigetstr(capname)
6954                         if code is None:
6955                                 code = self._default_term_codes[capname]
6956                         term_codes[k] = code
6957                 object.__setattr__(self, "_term_codes", term_codes)
6958                 return True
6959
6960         def _format_msg(self, msg):
6961                 return ">>> %s" % msg
6962
6963         def _erase(self):
6964                 self.out.write(
6965                         self._term_codes['carriage_return'] + \
6966                         self._term_codes['clr_eol'])
6967                 self.out.flush()
6968                 self._displayed = False
6969
6970         def _display(self, line):
6971                 self.out.write(line)
6972                 self.out.flush()
6973                 self._displayed = True
6974
6975         def _update(self, msg):
6976
6977                 out = self.out
6978                 if not self._isatty:
6979                         out.write(self._format_msg(msg) + self._term_codes['newline'])
6980                         self.out.flush()
6981                         self._displayed = True
6982                         return
6983
6984                 if self._displayed:
6985                         self._erase()
6986
6987                 self._display(self._format_msg(msg))
6988
6989         def displayMessage(self, msg):
6990
6991                 was_displayed = self._displayed
6992
6993                 if self._isatty and self._displayed:
6994                         self._erase()
6995
6996                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
6997                 self.out.flush()
6998                 self._displayed = False
6999
7000                 if was_displayed:
7001                         self._changed = True
7002                         self.display()
7003
7004         def reset(self):
7005                 self.maxval = 0
7006                 self.merges = 0
7007                 for name in self._bound_properties:
7008                         object.__setattr__(self, name, 0)
7009
7010                 if self._displayed:
7011                         self.out.write(self._term_codes['newline'])
7012                         self.out.flush()
7013                         self._displayed = False
7014
7015         def __setattr__(self, name, value):
7016                 old_value = getattr(self, name)
7017                 if value == old_value:
7018                         return
7019                 object.__setattr__(self, name, value)
7020                 if name in self._bound_properties:
7021                         self._property_change(name, old_value, value)
7022
7023         def _property_change(self, name, old_value, new_value):
7024                 self._changed = True
7025                 self.display()
7026
7027         def _load_avg_str(self):
7028                 try:
7029                         avg = getloadavg()
7030                 except OSError:
7031                         return 'unknown'
7032
7033                 max_avg = max(avg)
7034
7035                 if max_avg < 10:
7036                         digits = 2
7037                 elif max_avg < 100:
7038                         digits = 1
7039                 else:
7040                         digits = 0
7041
7042                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
7043
7044         def display(self):
7045                 """
7046                 Display status on stdout, but only if something has
7047                 changed since the last call.
7048                 """
7049
7050                 if self.quiet:
7051                         return
7052
7053                 current_time = time.time()
7054                 time_delta = current_time - self._last_display_time
7055                 if self._displayed and \
7056                         not self._changed:
7057                         if not self._isatty:
7058                                 return
7059                         if time_delta < self._min_display_latency:
7060                                 return
7061
7062                 self._last_display_time = current_time
7063                 self._changed = False
7064                 self._display_status()
7065
7066         def _display_status(self):
7067                 # Don't use len(self._completed_tasks) here since that also
7068                 # can include uninstall tasks.
7069                 curval_str = str(self.curval)
7070                 maxval_str = str(self.maxval)
7071                 running_str = str(self.running)
7072                 failed_str = str(self.failed)
7073                 load_avg_str = self._load_avg_str()
7074
7075                 color_output = StringIO()
7076                 plain_output = StringIO()
7077                 style_file = portage.output.ConsoleStyleFile(color_output)
7078                 style_file.write_listener = plain_output
7079                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
7080                 style_writer.style_listener = style_file.new_styles
7081                 f = formatter.AbstractFormatter(style_writer)
7082
7083                 number_style = "INFORM"
7084                 f.add_literal_data("Jobs: ")
7085                 f.push_style(number_style)
7086                 f.add_literal_data(curval_str)
7087                 f.pop_style()
7088                 f.add_literal_data(" of ")
7089                 f.push_style(number_style)
7090                 f.add_literal_data(maxval_str)
7091                 f.pop_style()
7092                 f.add_literal_data(" complete")
7093
7094                 if self.running:
7095                         f.add_literal_data(", ")
7096                         f.push_style(number_style)
7097                         f.add_literal_data(running_str)
7098                         f.pop_style()
7099                         f.add_literal_data(" running")
7100
7101                 if self.failed:
7102                         f.add_literal_data(", ")
7103                         f.push_style(number_style)
7104                         f.add_literal_data(failed_str)
7105                         f.pop_style()
7106                         f.add_literal_data(" failed")
7107
7108                 padding = self._jobs_column_width - len(plain_output.getvalue())
7109                 if padding > 0:
7110                         f.add_literal_data(padding * " ")
7111
7112                 f.add_literal_data("Load avg: ")
7113                 f.add_literal_data(load_avg_str)
7114
7115                 # Truncate to fit width, to avoid making the terminal scroll if the
7116                 # line overflows (happens when the load average is large).
7117                 plain_output = plain_output.getvalue()
7118                 if self._isatty and len(plain_output) > self.width:
7119                         # Use plain_output here since it's easier to truncate
7120                         # properly than the color output which contains console
7121                         # color codes.
7122                         self._update(plain_output[:self.width])
7123                 else:
7124                         self._update(color_output.getvalue())
7125
7126                 if self.xterm_titles:
7127                         xtermTitle(" ".join(plain_output.split()))
7128
7129 class Scheduler(PollScheduler):
7130
7131         _opts_ignore_blockers = \
7132                 frozenset(["--buildpkgonly",
7133                 "--fetchonly", "--fetch-all-uri",
7134                 "--nodeps", "--pretend"])
7135
7136         _opts_no_background = \
7137                 frozenset(["--pretend",
7138                 "--fetchonly", "--fetch-all-uri"])
7139
7140         _opts_no_restart = frozenset(["--buildpkgonly",
7141                 "--fetchonly", "--fetch-all-uri", "--pretend"])
7142
7143         _bad_resume_opts = set(["--ask", "--changelog",
7144                 "--resume", "--skipfirst"])
7145
7146         _fetch_log = os.path.join(_emerge_log_dir, 'emerge-fetch.log')
7147
7148         class _iface_class(SlotObject):
7149                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
7150                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
7151                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
7152                         "unregister")
7153
7154         class _fetch_iface_class(SlotObject):
7155                 __slots__ = ("log_file", "schedule")
7156
7157         _task_queues_class = slot_dict_class(
7158                 ("merge", "jobs", "fetch", "unpack"), prefix="")
7159
7160         class _build_opts_class(SlotObject):
7161                 __slots__ = ("buildpkg", "buildpkgonly",
7162                         "fetch_all_uri", "fetchonly", "pretend")
7163
7164         class _binpkg_opts_class(SlotObject):
7165                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
7166
7167         class _pkg_count_class(SlotObject):
7168                 __slots__ = ("curval", "maxval")
7169
7170         class _emerge_log_class(SlotObject):
7171                 __slots__ = ("xterm_titles",)
7172
7173                 def log(self, *pargs, **kwargs):
7174                         if not self.xterm_titles:
7175                                 # Avoid interference with the scheduler's status display.
7176                                 kwargs.pop("short_msg", None)
7177                         emergelog(self.xterm_titles, *pargs, **kwargs)
7178
7179         class _failed_pkg(SlotObject):
7180                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
7181
7182         class _ConfigPool(object):
7183                 """Interface for a task to temporarily allocate a config
7184                 instance from a pool. This allows a task to be constructed
7185                 long before the config instance actually becomes needed, like
7186                 when prefetchers are constructed for the whole merge list."""
7187                 __slots__ = ("_root", "_allocate", "_deallocate")
7188                 def __init__(self, root, allocate, deallocate):
7189                         self._root = root
7190                         self._allocate = allocate
7191                         self._deallocate = deallocate
7192                 def allocate(self):
7193                         return self._allocate(self._root)
7194                 def deallocate(self, settings):
7195                         self._deallocate(settings)
7196
7197         class _unknown_internal_error(portage.exception.PortageException):
7198                 """
7199                 Used internally to terminate scheduling. The specific reason for
7200                 the failure should have been dumped to stderr.
7201                 """
7202                 def __init__(self, value=""):
7203                         portage.exception.PortageException.__init__(self, value)
7204
7205         def __init__(self, settings, trees, mtimedb, myopts,
7206                 spinner, mergelist, favorites, digraph):
7207                 PollScheduler.__init__(self)
7208                 self.settings = settings
7209                 self.target_root = settings["ROOT"]
7210                 self.trees = trees
7211                 self.myopts = myopts
7212                 self._spinner = spinner
7213                 self._mtimedb = mtimedb
7214                 self._mergelist = mergelist
7215                 self._favorites = favorites
7216                 self._args_set = InternalPackageSet(favorites)
7217                 self._build_opts = self._build_opts_class()
7218                 for k in self._build_opts.__slots__:
7219                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
7220                 self._binpkg_opts = self._binpkg_opts_class()
7221                 for k in self._binpkg_opts.__slots__:
7222                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
7223
7224                 self.curval = 0
7225                 self._logger = self._emerge_log_class()
7226                 self._task_queues = self._task_queues_class()
7227                 for k in self._task_queues.allowed_keys:
7228                         setattr(self._task_queues, k,
7229                                 SequentialTaskQueue())
7230
7231                 # Holds merges that will wait to be executed when no builds are
7232                 # executing. This is useful for system packages since dependencies
7233                 # on system packages are frequently unspecified.
7234                 self._merge_wait_queue = []
7235                 # Holds merges that have been transfered from the merge_wait_queue to
7236                 # the actual merge queue. They are removed from this list upon
7237                 # completion. Other packages can start building only when this list is
7238                 # empty.
7239                 self._merge_wait_scheduled = []
7240
7241                 # Holds system packages and their deep runtime dependencies. Before
7242                 # being merged, these packages go to merge_wait_queue, to be merged
7243                 # when no other packages are building.
7244                 self._deep_system_deps = set()
7245
7246                 # Holds packages to merge which will satisfy currently unsatisfied
7247                 # deep runtime dependencies of system packages. If this is not empty
7248                 # then no parallel builds will be spawned until it is empty. This
7249                 # minimizes the possibility that a build will fail due to the system
7250                 # being in a fragile state. For example, see bug #259954.
7251                 self._unsatisfied_system_deps = set()
7252
7253                 self._status_display = JobStatusDisplay(
7254                         xterm_titles=('notitles' not in settings.features))
7255                 self._max_load = myopts.get("--load-average")
7256                 max_jobs = myopts.get("--jobs")
7257                 if max_jobs is None:
7258                         max_jobs = 1
7259                 self._set_max_jobs(max_jobs)
7260
7261                 # The root where the currently running
7262                 # portage instance is installed.
7263                 self._running_root = trees["/"]["root_config"]
7264                 self.edebug = 0
7265                 if settings.get("PORTAGE_DEBUG", "") == "1":
7266                         self.edebug = 1
7267                 self.pkgsettings = {}
7268                 self._config_pool = {}
7269                 self._blocker_db = {}
7270                 for root in trees:
7271                         self._config_pool[root] = []
7272                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
7273
7274                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
7275                         schedule=self._schedule_fetch)
7276                 self._sched_iface = self._iface_class(
7277                         dblinkEbuildPhase=self._dblink_ebuild_phase,
7278                         dblinkDisplayMerge=self._dblink_display_merge,
7279                         dblinkElog=self._dblink_elog,
7280                         dblinkEmergeLog=self._dblink_emerge_log,
7281                         fetch=fetch_iface, register=self._register,
7282                         schedule=self._schedule_wait,
7283                         scheduleSetup=self._schedule_setup,
7284                         scheduleUnpack=self._schedule_unpack,
7285                         scheduleYield=self._schedule_yield,
7286                         unregister=self._unregister)
7287
7288                 self._prefetchers = weakref.WeakValueDictionary()
7289                 self._pkg_queue = []
7290                 self._completed_tasks = set()
7291
7292                 self._failed_pkgs = []
7293                 self._failed_pkgs_all = []
7294                 self._failed_pkgs_die_msgs = []
7295                 self._post_mod_echo_msgs = []
7296                 self._parallel_fetch = False
7297                 merge_count = len([x for x in mergelist \
7298                         if isinstance(x, Package) and x.operation == "merge"])
7299                 self._pkg_count = self._pkg_count_class(
7300                         curval=0, maxval=merge_count)
7301                 self._status_display.maxval = self._pkg_count.maxval
7302
7303                 # The load average takes some time to respond when new
7304                 # jobs are added, so we need to limit the rate of adding
7305                 # new jobs.
7306                 self._job_delay_max = 10
7307                 self._job_delay_factor = 1.0
7308                 self._job_delay_exp = 1.5
7309                 self._previous_job_start_time = None
7310
7311                 self._set_digraph(digraph)
7312
7313                 # This is used to memoize the _choose_pkg() result when
7314                 # no packages can be chosen until one of the existing
7315                 # jobs completes.
7316                 self._choose_pkg_return_early = False
7317
7318                 features = self.settings.features
7319                 if "parallel-fetch" in features and \
7320                         not ("--pretend" in self.myopts or \
7321                         "--fetch-all-uri" in self.myopts or \
7322                         "--fetchonly" in self.myopts):
7323                         if "distlocks" not in features:
7324                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
7325                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
7326                                         "requires the distlocks feature enabled"+"\n",
7327                                         noiselevel=-1)
7328                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
7329                                         "thus parallel-fetching is being disabled"+"\n",
7330                                         noiselevel=-1)
7331                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
7332                         elif len(mergelist) > 1:
7333                                 self._parallel_fetch = True
7334
7335                 if self._parallel_fetch:
7336                                 # clear out existing fetch log if it exists
7337                                 try:
7338                                         open(self._fetch_log, 'w')
7339                                 except EnvironmentError:
7340                                         pass
7341
7342                 self._running_portage = None
7343                 portage_match = self._running_root.trees["vartree"].dbapi.match(
7344                         portage.const.PORTAGE_PACKAGE_ATOM)
7345                 if portage_match:
7346                         cpv = portage_match.pop()
7347                         self._running_portage = self._pkg(cpv, "installed",
7348                                 self._running_root, installed=True)
7349
7350         def _poll(self, timeout=None):
7351                 self._schedule()
7352                 PollScheduler._poll(self, timeout=timeout)
7353
7354         def _set_max_jobs(self, max_jobs):
7355                 self._max_jobs = max_jobs
7356                 self._task_queues.jobs.max_jobs = max_jobs
7357
7358         def _background_mode(self):
7359                 """
7360                 Check if background mode is enabled and adjust states as necessary.
7361
7362                 @rtype: bool
7363                 @returns: True if background mode is enabled, False otherwise.
7364                 """
7365                 background = (self._max_jobs is True or \
7366                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
7367                         not bool(self._opts_no_background.intersection(self.myopts))
7368
7369                 if background:
7370                         interactive_tasks = self._get_interactive_tasks()
7371                         if interactive_tasks:
7372                                 background = False
7373                                 writemsg_level(">>> Sending package output to stdio due " + \
7374                                         "to interactive package(s):\n",
7375                                         level=logging.INFO, noiselevel=-1)
7376                                 msg = [""]
7377                                 for pkg in interactive_tasks:
7378                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
7379                                         if pkg.root != "/":
7380                                                 pkg_str += " for " + pkg.root
7381                                         msg.append(pkg_str)
7382                                 msg.append("")
7383                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
7384                                         level=logging.INFO, noiselevel=-1)
7385                                 if self._max_jobs is True or self._max_jobs > 1:
7386                                         self._set_max_jobs(1)
7387                                         writemsg_level(">>> Setting --jobs=1 due " + \
7388                                                 "to the above interactive package(s)\n",
7389                                                 level=logging.INFO, noiselevel=-1)
7390
7391                 self._status_display.quiet = \
7392                         not background or \
7393                         ("--quiet" in self.myopts and \
7394                         "--verbose" not in self.myopts)
7395
7396                 self._logger.xterm_titles = \
7397                         "notitles" not in self.settings.features and \
7398                         self._status_display.quiet
7399
7400                 return background
7401
7402         def _get_interactive_tasks(self):
7403                 from portage import flatten
7404                 from portage.dep import use_reduce, paren_reduce
7405                 interactive_tasks = []
7406                 for task in self._mergelist:
7407                         if not (isinstance(task, Package) and \
7408                                 task.operation == "merge"):
7409                                 continue
7410                         try:
7411                                 properties = flatten(use_reduce(paren_reduce(
7412                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
7413                         except portage.exception.InvalidDependString, e:
7414                                 show_invalid_depstring_notice(task,
7415                                         task.metadata["PROPERTIES"], str(e))
7416                                 raise self._unknown_internal_error()
7417                         if "interactive" in properties:
7418                                 interactive_tasks.append(task)
7419                 return interactive_tasks
7420
7421         def _set_digraph(self, digraph):
7422                 if "--nodeps" in self.myopts or \
7423                         (self._max_jobs is not True and self._max_jobs < 2):
7424                         # save some memory
7425                         self._digraph = None
7426                         return
7427
7428                 self._digraph = digraph
7429                 self._find_system_deps()
7430                 self._prune_digraph()
7431                 self._prevent_builddir_collisions()
7432
7433         def _find_system_deps(self):
7434                 """
7435                 Find system packages and their deep runtime dependencies. Before being
7436                 merged, these packages go to merge_wait_queue, to be merged when no
7437                 other packages are building.
7438                 """
7439                 deep_system_deps = self._deep_system_deps
7440                 deep_system_deps.clear()
7441                 deep_system_deps.update(
7442                         _find_deep_system_runtime_deps(self._digraph))
7443                 deep_system_deps.difference_update([pkg for pkg in \
7444                         deep_system_deps if pkg.operation != "merge"])
7445
7446         def _prune_digraph(self):
7447                 """
7448                 Prune any root nodes that are irrelevant.
7449                 """
7450
7451                 graph = self._digraph
7452                 completed_tasks = self._completed_tasks
7453                 removed_nodes = set()
7454                 while True:
7455                         for node in graph.root_nodes():
7456                                 if not isinstance(node, Package) or \
7457                                         (node.installed and node.operation == "nomerge") or \
7458                                         node.onlydeps or \
7459                                         node in completed_tasks:
7460                                         removed_nodes.add(node)
7461                         if removed_nodes:
7462                                 graph.difference_update(removed_nodes)
7463                         if not removed_nodes:
7464                                 break
7465                         removed_nodes.clear()
7466
7467         def _prevent_builddir_collisions(self):
7468                 """
7469                 When building stages, sometimes the same exact cpv needs to be merged
7470                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
7471                 in the builddir. Currently, normal file locks would be inappropriate
7472                 for this purpose since emerge holds all of it's build dir locks from
7473                 the main process.
7474                 """
7475                 cpv_map = {}
7476                 for pkg in self._mergelist:
7477                         if not isinstance(pkg, Package):
7478                                 # a satisfied blocker
7479                                 continue
7480                         if pkg.installed:
7481                                 continue
7482                         if pkg.cpv not in cpv_map:
7483                                 cpv_map[pkg.cpv] = [pkg]
7484                                 continue
7485                         for earlier_pkg in cpv_map[pkg.cpv]:
7486                                 self._digraph.add(earlier_pkg, pkg,
7487                                         priority=DepPriority(buildtime=True))
7488                         cpv_map[pkg.cpv].append(pkg)
7489
7490         class _pkg_failure(portage.exception.PortageException):
7491                 """
7492                 An instance of this class is raised by unmerge() when
7493                 an uninstallation fails.
7494                 """
7495                 status = 1
7496                 def __init__(self, *pargs):
7497                         portage.exception.PortageException.__init__(self, pargs)
7498                         if pargs:
7499                                 self.status = pargs[0]
7500
7501         def _schedule_fetch(self, fetcher):
7502                 """
7503                 Schedule a fetcher on the fetch queue, in order to
7504                 serialize access to the fetch log.
7505                 """
7506                 self._task_queues.fetch.addFront(fetcher)
7507
7508         def _schedule_setup(self, setup_phase):
7509                 """
7510                 Schedule a setup phase on the merge queue, in order to
7511                 serialize unsandboxed access to the live filesystem.
7512                 """
7513                 self._task_queues.merge.addFront(setup_phase)
7514                 self._schedule()
7515
7516         def _schedule_unpack(self, unpack_phase):
7517                 """
7518                 Schedule an unpack phase on the unpack queue, in order
7519                 to serialize $DISTDIR access for live ebuilds.
7520                 """
7521                 self._task_queues.unpack.add(unpack_phase)
7522
7523         def _find_blockers(self, new_pkg):
7524                 """
7525                 Returns a callable which should be called only when
7526                 the vdb lock has been acquired.
7527                 """
7528                 def get_blockers():
7529                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
7530                 return get_blockers
7531
7532         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
7533                 if self._opts_ignore_blockers.intersection(self.myopts):
7534                         return None
7535
7536                 # Call gc.collect() here to avoid heap overflow that
7537                 # triggers 'Cannot allocate memory' errors (reported
7538                 # with python-2.5).
7539                 import gc
7540                 gc.collect()
7541
7542                 blocker_db = self._blocker_db[new_pkg.root]
7543
7544                 blocker_dblinks = []
7545                 for blocking_pkg in blocker_db.findInstalledBlockers(
7546                         new_pkg, acquire_lock=acquire_lock):
7547                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
7548                                 continue
7549                         if new_pkg.cpv == blocking_pkg.cpv:
7550                                 continue
7551                         blocker_dblinks.append(portage.dblink(
7552                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
7553                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
7554                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
7555
7556                 gc.collect()
7557
7558                 return blocker_dblinks
7559
7560         def _dblink_pkg(self, pkg_dblink):
7561                 cpv = pkg_dblink.mycpv
7562                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
7563                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
7564                 installed = type_name == "installed"
7565                 return self._pkg(cpv, type_name, root_config, installed=installed)
7566
7567         def _append_to_log_path(self, log_path, msg):
7568                 f = open(log_path, 'a')
7569                 try:
7570                         f.write(msg)
7571                 finally:
7572                         f.close()
7573
7574         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
7575
7576                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7577                 log_file = None
7578                 out = sys.stdout
7579                 background = self._background
7580
7581                 if background and log_path is not None:
7582                         log_file = open(log_path, 'a')
7583                         out = log_file
7584
7585                 try:
7586                         for msg in msgs:
7587                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
7588                 finally:
7589                         if log_file is not None:
7590                                 log_file.close()
7591
7592         def _dblink_emerge_log(self, msg):
7593                 self._logger.log(msg)
7594
7595         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
7596                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
7597                 background = self._background
7598
7599                 if log_path is None:
7600                         if not (background and level < logging.WARN):
7601                                 portage.util.writemsg_level(msg,
7602                                         level=level, noiselevel=noiselevel)
7603                 else:
7604                         if not background:
7605                                 portage.util.writemsg_level(msg,
7606                                         level=level, noiselevel=noiselevel)
7607                         self._append_to_log_path(log_path, msg)
7608
7609         def _dblink_ebuild_phase(self,
7610                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
7611                 """
7612                 Using this callback for merge phases allows the scheduler
7613                 to run while these phases execute asynchronously, and allows
7614                 the scheduler control output handling.
7615                 """
7616
7617                 scheduler = self._sched_iface
7618                 settings = pkg_dblink.settings
7619                 pkg = self._dblink_pkg(pkg_dblink)
7620                 background = self._background
7621                 log_path = settings.get("PORTAGE_LOG_FILE")
7622
7623                 ebuild_phase = EbuildPhase(background=background,
7624                         pkg=pkg, phase=phase, scheduler=scheduler,
7625                         settings=settings, tree=pkg_dblink.treetype)
7626                 ebuild_phase.start()
7627                 ebuild_phase.wait()
7628
7629                 return ebuild_phase.returncode
7630
7631         def _generate_digests(self):
7632                 """
7633                 Generate digests if necessary for --digests or FEATURES=digest.
7634                 In order to avoid interference, this must done before parallel
7635                 tasks are started.
7636                 """
7637
7638                 if '--fetchonly' in self.myopts:
7639                         return os.EX_OK
7640
7641                 digest = '--digest' in self.myopts
7642                 if not digest:
7643                         for pkgsettings in self.pkgsettings.itervalues():
7644                                 if 'digest' in pkgsettings.features:
7645                                         digest = True
7646                                         break
7647
7648                 if not digest:
7649                         return os.EX_OK
7650
7651                 for x in self._mergelist:
7652                         if not isinstance(x, Package) or \
7653                                 x.type_name != 'ebuild' or \
7654                                 x.operation != 'merge':
7655                                 continue
7656                         pkgsettings = self.pkgsettings[x.root]
7657                         if '--digest' not in self.myopts and \
7658                                 'digest' not in pkgsettings.features:
7659                                 continue
7660                         portdb = x.root_config.trees['porttree'].dbapi
7661                         ebuild_path = portdb.findname(x.cpv)
7662                         if not ebuild_path:
7663                                 writemsg_level(
7664                                         "!!! Could not locate ebuild for '%s'.\n" \
7665                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
7666                                 return 1
7667                         pkgsettings['O'] = os.path.dirname(ebuild_path)
7668                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
7669                                 writemsg_level(
7670                                         "!!! Unable to generate manifest for '%s'.\n" \
7671                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
7672                                 return 1
7673
7674                 return os.EX_OK
7675
7676         def _check_manifests(self):
7677                 # Verify all the manifests now so that the user is notified of failure
7678                 # as soon as possible.
7679                 if "strict" not in self.settings.features or \
7680                         "--fetchonly" in self.myopts or \
7681                         "--fetch-all-uri" in self.myopts:
7682                         return os.EX_OK
7683
7684                 shown_verifying_msg = False
7685                 quiet_settings = {}
7686                 for myroot, pkgsettings in self.pkgsettings.iteritems():
7687                         quiet_config = portage.config(clone=pkgsettings)
7688                         quiet_config["PORTAGE_QUIET"] = "1"
7689                         quiet_config.backup_changes("PORTAGE_QUIET")
7690                         quiet_settings[myroot] = quiet_config
7691                         del quiet_config
7692
7693                 for x in self._mergelist:
7694                         if not isinstance(x, Package) or \
7695                                 x.type_name != "ebuild":
7696                                 continue
7697
7698                         if not shown_verifying_msg:
7699                                 shown_verifying_msg = True
7700                                 self._status_msg("Verifying ebuild manifests")
7701
7702                         root_config = x.root_config
7703                         portdb = root_config.trees["porttree"].dbapi
7704                         quiet_config = quiet_settings[root_config.root]
7705                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
7706                         if not portage.digestcheck([], quiet_config, strict=True):
7707                                 return 1
7708
7709                 return os.EX_OK
7710
7711         def _add_prefetchers(self):
7712
7713                 if not self._parallel_fetch:
7714                         return
7715
7716                 if self._parallel_fetch:
7717                         self._status_msg("Starting parallel fetch")
7718
7719                         prefetchers = self._prefetchers
7720                         getbinpkg = "--getbinpkg" in self.myopts
7721
7722                         # In order to avoid "waiting for lock" messages
7723                         # at the beginning, which annoy users, never
7724                         # spawn a prefetcher for the first package.
7725                         for pkg in self._mergelist[1:]:
7726                                 prefetcher = self._create_prefetcher(pkg)
7727                                 if prefetcher is not None:
7728                                         self._task_queues.fetch.add(prefetcher)
7729                                         prefetchers[pkg] = prefetcher
7730
7731         def _create_prefetcher(self, pkg):
7732                 """
7733                 @return: a prefetcher, or None if not applicable
7734                 """
7735                 prefetcher = None
7736
7737                 if not isinstance(pkg, Package):
7738                         pass
7739
7740                 elif pkg.type_name == "ebuild":
7741
7742                         prefetcher = EbuildFetcher(background=True,
7743                                 config_pool=self._ConfigPool(pkg.root,
7744                                 self._allocate_config, self._deallocate_config),
7745                                 fetchonly=1, logfile=self._fetch_log,
7746                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
7747
7748                 elif pkg.type_name == "binary" and \
7749                         "--getbinpkg" in self.myopts and \
7750                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
7751
7752                         prefetcher = BinpkgPrefetcher(background=True,
7753                                 pkg=pkg, scheduler=self._sched_iface)
7754
7755                 return prefetcher
7756
7757         def _is_restart_scheduled(self):
7758                 """
7759                 Check if the merge list contains a replacement
7760                 for the current running instance, that will result
7761                 in restart after merge.
7762                 @rtype: bool
7763                 @returns: True if a restart is scheduled, False otherwise.
7764                 """
7765                 if self._opts_no_restart.intersection(self.myopts):
7766                         return False
7767
7768                 mergelist = self._mergelist
7769
7770                 for i, pkg in enumerate(mergelist):
7771                         if self._is_restart_necessary(pkg) and \
7772                                 i != len(mergelist) - 1:
7773                                 return True
7774
7775                 return False
7776
7777         def _is_restart_necessary(self, pkg):
7778                 """
7779                 @return: True if merging the given package
7780                         requires restart, False otherwise.
7781                 """
7782
7783                 # Figure out if we need a restart.
7784                 if pkg.root == self._running_root.root and \
7785                         portage.match_from_list(
7786                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
7787                         if self._running_portage:
7788                                 return pkg.cpv != self._running_portage.cpv
7789                         return True
7790                 return False
7791
7792         def _restart_if_necessary(self, pkg):
7793                 """
7794                 Use execv() to restart emerge. This happens
7795                 if portage upgrades itself and there are
7796                 remaining packages in the list.
7797                 """
7798
7799                 if self._opts_no_restart.intersection(self.myopts):
7800                         return
7801
7802                 if not self._is_restart_necessary(pkg):
7803                         return
7804
7805                 if pkg == self._mergelist[-1]:
7806                         return
7807
7808                 self._main_loop_cleanup()
7809
7810                 logger = self._logger
7811                 pkg_count = self._pkg_count
7812                 mtimedb = self._mtimedb
7813                 bad_resume_opts = self._bad_resume_opts
7814
7815                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
7816                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
7817
7818                 logger.log(" *** RESTARTING " + \
7819                         "emerge via exec() after change of " + \
7820                         "portage version.")
7821
7822                 mtimedb["resume"]["mergelist"].remove(list(pkg))
7823                 mtimedb.commit()
7824                 portage.run_exitfuncs()
7825                 mynewargv = [sys.argv[0], "--resume"]
7826                 resume_opts = self.myopts.copy()
7827                 # For automatic resume, we need to prevent
7828                 # any of bad_resume_opts from leaking in
7829                 # via EMERGE_DEFAULT_OPTS.
7830                 resume_opts["--ignore-default-opts"] = True
7831                 for myopt, myarg in resume_opts.iteritems():
7832                         if myopt not in bad_resume_opts:
7833                                 if myarg is True:
7834                                         mynewargv.append(myopt)
7835                                 else:
7836                                         mynewargv.append(myopt +"="+ str(myarg))
7837                 # priority only needs to be adjusted on the first run
7838                 os.environ["PORTAGE_NICENESS"] = "0"
7839                 os.execv(mynewargv[0], mynewargv)
7840
7841         def merge(self):
7842
7843                 if "--resume" in self.myopts:
7844                         # We're resuming.
7845                         portage.writemsg_stdout(
7846                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
7847                         self._logger.log(" *** Resuming merge...")
7848
7849                 self._save_resume_list()
7850
7851                 try:
7852                         self._background = self._background_mode()
7853                 except self._unknown_internal_error:
7854                         return 1
7855
7856                 for root in self.trees:
7857                         root_config = self.trees[root]["root_config"]
7858
7859                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
7860                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
7861                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
7862                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
7863                         if not tmpdir or not os.path.isdir(tmpdir):
7864                                 msg = "The directory specified in your " + \
7865                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
7866                                 "does not exist. Please create this " + \
7867                                 "directory or correct your PORTAGE_TMPDIR setting."
7868                                 msg = textwrap.wrap(msg, 70)
7869                                 out = portage.output.EOutput()
7870                                 for l in msg:
7871                                         out.eerror(l)
7872                                 return 1
7873
7874                         if self._background:
7875                                 root_config.settings.unlock()
7876                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
7877                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
7878                                 root_config.settings.lock()
7879
7880                         self.pkgsettings[root] = portage.config(
7881                                 clone=root_config.settings)
7882
7883                 rval = self._generate_digests()
7884                 if rval != os.EX_OK:
7885                         return rval
7886
7887                 rval = self._check_manifests()
7888                 if rval != os.EX_OK:
7889                         return rval
7890
7891                 keep_going = "--keep-going" in self.myopts
7892                 fetchonly = self._build_opts.fetchonly
7893                 mtimedb = self._mtimedb
7894                 failed_pkgs = self._failed_pkgs
7895
7896                 while True:
7897                         rval = self._merge()
7898                         if rval == os.EX_OK or fetchonly or not keep_going:
7899                                 break
7900                         if "resume" not in mtimedb:
7901                                 break
7902                         mergelist = self._mtimedb["resume"].get("mergelist")
7903                         if not mergelist:
7904                                 break
7905
7906                         if not failed_pkgs:
7907                                 break
7908
7909                         for failed_pkg in failed_pkgs:
7910                                 mergelist.remove(list(failed_pkg.pkg))
7911
7912                         self._failed_pkgs_all.extend(failed_pkgs)
7913                         del failed_pkgs[:]
7914
7915                         if not mergelist:
7916                                 break
7917
7918                         if not self._calc_resume_list():
7919                                 break
7920
7921                         clear_caches(self.trees)
7922                         if not self._mergelist:
7923                                 break
7924
7925                         self._save_resume_list()
7926                         self._pkg_count.curval = 0
7927                         self._pkg_count.maxval = len([x for x in self._mergelist \
7928                                 if isinstance(x, Package) and x.operation == "merge"])
7929                         self._status_display.maxval = self._pkg_count.maxval
7930
7931                 self._logger.log(" *** Finished. Cleaning up...")
7932
7933                 if failed_pkgs:
7934                         self._failed_pkgs_all.extend(failed_pkgs)
7935                         del failed_pkgs[:]
7936
7937                 background = self._background
7938                 failure_log_shown = False
7939                 if background and len(self._failed_pkgs_all) == 1:
7940                         # If only one package failed then just show it's
7941                         # whole log for easy viewing.
7942                         failed_pkg = self._failed_pkgs_all[-1]
7943                         build_dir = failed_pkg.build_dir
7944                         log_file = None
7945
7946                         log_paths = [failed_pkg.build_log]
7947
7948                         log_path = self._locate_failure_log(failed_pkg)
7949                         if log_path is not None:
7950                                 try:
7951                                         log_file = open(log_path)
7952                                 except IOError:
7953                                         pass
7954
7955                         if log_file is not None:
7956                                 try:
7957                                         for line in log_file:
7958                                                 writemsg_level(line, noiselevel=-1)
7959                                 finally:
7960                                         log_file.close()
7961                                 failure_log_shown = True
7962
7963                 # Dump mod_echo output now since it tends to flood the terminal.
7964                 # This allows us to avoid having more important output, generated
7965                 # later, from being swept away by the mod_echo output.
7966                 mod_echo_output =  _flush_elog_mod_echo()
7967
7968                 if background and not failure_log_shown and \
7969                         self._failed_pkgs_all and \
7970                         self._failed_pkgs_die_msgs and \
7971                         not mod_echo_output:
7972
7973                         printer = portage.output.EOutput()
7974                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
7975                                 root_msg = ""
7976                                 if mysettings["ROOT"] != "/":
7977                                         root_msg = " merged to %s" % mysettings["ROOT"]
7978                                 print
7979                                 printer.einfo("Error messages for package %s%s:" % \
7980                                         (colorize("INFORM", key), root_msg))
7981                                 print
7982                                 for phase in portage.const.EBUILD_PHASES:
7983                                         if phase not in logentries:
7984                                                 continue
7985                                         for msgtype, msgcontent in logentries[phase]:
7986                                                 if isinstance(msgcontent, basestring):
7987                                                         msgcontent = [msgcontent]
7988                                                 for line in msgcontent:
7989                                                         printer.eerror(line.strip("\n"))
7990
7991                 if self._post_mod_echo_msgs:
7992                         for msg in self._post_mod_echo_msgs:
7993                                 msg()
7994
7995                 if len(self._failed_pkgs_all) > 1 or \
7996                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
7997                         if len(self._failed_pkgs_all) > 1:
7998                                 msg = "The following %d packages have " % \
7999                                         len(self._failed_pkgs_all) + \
8000                                         "failed to build or install:"
8001                         else:
8002                                 msg = "The following package has " + \
8003                                         "failed to build or install:"
8004                         prefix = bad(" * ")
8005                         writemsg(prefix + "\n", noiselevel=-1)
8006                         from textwrap import wrap
8007                         for line in wrap(msg, 72):
8008                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
8009                         writemsg(prefix + "\n", noiselevel=-1)
8010                         for failed_pkg in self._failed_pkgs_all:
8011                                 writemsg("%s\t%s\n" % (prefix,
8012                                         colorize("INFORM", str(failed_pkg.pkg))),
8013                                         noiselevel=-1)
8014                         writemsg(prefix + "\n", noiselevel=-1)
8015
8016                 return rval
8017
8018         def _elog_listener(self, mysettings, key, logentries, fulltext):
8019                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
8020                 if errors:
8021                         self._failed_pkgs_die_msgs.append(
8022                                 (mysettings, key, errors))
8023
8024         def _locate_failure_log(self, failed_pkg):
8025
8026                 build_dir = failed_pkg.build_dir
8027                 log_file = None
8028
8029                 log_paths = [failed_pkg.build_log]
8030
8031                 for log_path in log_paths:
8032                         if not log_path:
8033                                 continue
8034
8035                         try:
8036                                 log_size = os.stat(log_path).st_size
8037                         except OSError:
8038                                 continue
8039
8040                         if log_size == 0:
8041                                 continue
8042
8043                         return log_path
8044
8045                 return None
8046
8047         def _add_packages(self):
8048                 pkg_queue = self._pkg_queue
8049                 for pkg in self._mergelist:
8050                         if isinstance(pkg, Package):
8051                                 pkg_queue.append(pkg)
8052                         elif isinstance(pkg, Blocker):
8053                                 pass
8054
8055         def _system_merge_started(self, merge):
8056                 """
8057                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
8058                 """
8059                 graph = self._digraph
8060                 if graph is None:
8061                         return
8062                 pkg = merge.merge.pkg
8063
8064                 # Skip this if $ROOT != / since it shouldn't matter if there
8065                 # are unsatisfied system runtime deps in this case.
8066                 if pkg.root != '/':
8067                         return
8068
8069                 completed_tasks = self._completed_tasks
8070                 unsatisfied = self._unsatisfied_system_deps
8071
8072                 def ignore_non_runtime_or_satisfied(priority):
8073                         """
8074                         Ignore non-runtime and satisfied runtime priorities.
8075                         """
8076                         if isinstance(priority, DepPriority) and \
8077                                 not priority.satisfied and \
8078                                 (priority.runtime or priority.runtime_post):
8079                                 return False
8080                         return True
8081
8082                 # When checking for unsatisfied runtime deps, only check
8083                 # direct deps since indirect deps are checked when the
8084                 # corresponding parent is merged.
8085                 for child in graph.child_nodes(pkg,
8086                         ignore_priority=ignore_non_runtime_or_satisfied):
8087                         if not isinstance(child, Package) or \
8088                                 child.operation == 'uninstall':
8089                                 continue
8090                         if child is pkg:
8091                                 continue
8092                         if child.operation == 'merge' and \
8093                                 child not in completed_tasks:
8094                                 unsatisfied.add(child)
8095
8096         def _merge_wait_exit_handler(self, task):
8097                 self._merge_wait_scheduled.remove(task)
8098                 self._merge_exit(task)
8099
8100         def _merge_exit(self, merge):
8101                 self._do_merge_exit(merge)
8102                 self._deallocate_config(merge.merge.settings)
8103                 if merge.returncode == os.EX_OK and \
8104                         not merge.merge.pkg.installed:
8105                         self._status_display.curval += 1
8106                 self._status_display.merges = len(self._task_queues.merge)
8107                 self._schedule()
8108
8109         def _do_merge_exit(self, merge):
8110                 pkg = merge.merge.pkg
8111                 if merge.returncode != os.EX_OK:
8112                         settings = merge.merge.settings
8113                         build_dir = settings.get("PORTAGE_BUILDDIR")
8114                         build_log = settings.get("PORTAGE_LOG_FILE")
8115
8116                         self._failed_pkgs.append(self._failed_pkg(
8117                                 build_dir=build_dir, build_log=build_log,
8118                                 pkg=pkg,
8119                                 returncode=merge.returncode))
8120                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
8121
8122                         self._status_display.failed = len(self._failed_pkgs)
8123                         return
8124
8125                 self._task_complete(pkg)
8126                 pkg_to_replace = merge.merge.pkg_to_replace
8127                 if pkg_to_replace is not None:
8128                         # When a package is replaced, mark it's uninstall
8129                         # task complete (if any).
8130                         uninst_hash_key = \
8131                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
8132                         self._task_complete(uninst_hash_key)
8133
8134                 if pkg.installed:
8135                         return
8136
8137                 self._restart_if_necessary(pkg)
8138
8139                 # Call mtimedb.commit() after each merge so that
8140                 # --resume still works after being interrupted
8141                 # by reboot, sigkill or similar.
8142                 mtimedb = self._mtimedb
8143                 mtimedb["resume"]["mergelist"].remove(list(pkg))
8144                 if not mtimedb["resume"]["mergelist"]:
8145                         del mtimedb["resume"]
8146                 mtimedb.commit()
8147
8148         def _build_exit(self, build):
8149                 if build.returncode == os.EX_OK:
8150                         self.curval += 1
8151                         merge = PackageMerge(merge=build)
8152                         if not build.build_opts.buildpkgonly and \
8153                                 build.pkg in self._deep_system_deps:
8154                                 # Since dependencies on system packages are frequently
8155                                 # unspecified, merge them only when no builds are executing.
8156                                 self._merge_wait_queue.append(merge)
8157                                 merge.addStartListener(self._system_merge_started)
8158                         else:
8159                                 merge.addExitListener(self._merge_exit)
8160                                 self._task_queues.merge.add(merge)
8161                                 self._status_display.merges = len(self._task_queues.merge)
8162                 else:
8163                         settings = build.settings
8164                         build_dir = settings.get("PORTAGE_BUILDDIR")
8165                         build_log = settings.get("PORTAGE_LOG_FILE")
8166
8167                         self._failed_pkgs.append(self._failed_pkg(
8168                                 build_dir=build_dir, build_log=build_log,
8169                                 pkg=build.pkg,
8170                                 returncode=build.returncode))
8171                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
8172
8173                         self._status_display.failed = len(self._failed_pkgs)
8174                         self._deallocate_config(build.settings)
8175                 self._jobs -= 1
8176                 self._status_display.running = self._jobs
8177                 self._schedule()
8178
8179         def _extract_exit(self, build):
8180                 self._build_exit(build)
8181
8182         def _task_complete(self, pkg):
8183                 self._completed_tasks.add(pkg)
8184                 self._unsatisfied_system_deps.discard(pkg)
8185                 self._choose_pkg_return_early = False
8186
8187         def _merge(self):
8188
8189                 self._add_prefetchers()
8190                 self._add_packages()
8191                 pkg_queue = self._pkg_queue
8192                 failed_pkgs = self._failed_pkgs
8193                 portage.locks._quiet = self._background
8194                 portage.elog._emerge_elog_listener = self._elog_listener
8195                 rval = os.EX_OK
8196
8197                 try:
8198                         self._main_loop()
8199                 finally:
8200                         self._main_loop_cleanup()
8201                         portage.locks._quiet = False
8202                         portage.elog._emerge_elog_listener = None
8203                         if failed_pkgs:
8204                                 rval = failed_pkgs[-1].returncode
8205
8206                 return rval
8207
8208         def _main_loop_cleanup(self):
8209                 del self._pkg_queue[:]
8210                 self._completed_tasks.clear()
8211                 self._deep_system_deps.clear()
8212                 self._unsatisfied_system_deps.clear()
8213                 self._choose_pkg_return_early = False
8214                 self._status_display.reset()
8215                 self._digraph = None
8216                 self._task_queues.fetch.clear()
8217
8218         def _choose_pkg(self):
8219                 """
8220                 Choose a task that has all it's dependencies satisfied.
8221                 """
8222
8223                 if self._choose_pkg_return_early:
8224                         return None
8225
8226                 if self._digraph is None:
8227                         if (self._jobs or self._task_queues.merge) and \
8228                                 not ("--nodeps" in self.myopts and \
8229                                 (self._max_jobs is True or self._max_jobs > 1)):
8230                                 self._choose_pkg_return_early = True
8231                                 return None
8232                         return self._pkg_queue.pop(0)
8233
8234                 if not (self._jobs or self._task_queues.merge):
8235                         return self._pkg_queue.pop(0)
8236
8237                 self._prune_digraph()
8238
8239                 chosen_pkg = None
8240                 later = set(self._pkg_queue)
8241                 for pkg in self._pkg_queue:
8242                         later.remove(pkg)
8243                         if not self._dependent_on_scheduled_merges(pkg, later):
8244                                 chosen_pkg = pkg
8245                                 break
8246
8247                 if chosen_pkg is not None:
8248                         self._pkg_queue.remove(chosen_pkg)
8249
8250                 if chosen_pkg is None:
8251                         # There's no point in searching for a package to
8252                         # choose until at least one of the existing jobs
8253                         # completes.
8254                         self._choose_pkg_return_early = True
8255
8256                 return chosen_pkg
8257
8258         def _dependent_on_scheduled_merges(self, pkg, later):
8259                 """
8260                 Traverse the subgraph of the given packages deep dependencies
8261                 to see if it contains any scheduled merges.
8262                 @param pkg: a package to check dependencies for
8263                 @type pkg: Package
8264                 @param later: packages for which dependence should be ignored
8265                         since they will be merged later than pkg anyway and therefore
8266                         delaying the merge of pkg will not result in a more optimal
8267                         merge order
8268                 @type later: set
8269                 @rtype: bool
8270                 @returns: True if the package is dependent, False otherwise.
8271                 """
8272
8273                 graph = self._digraph
8274                 completed_tasks = self._completed_tasks
8275
8276                 dependent = False
8277                 traversed_nodes = set([pkg])
8278                 direct_deps = graph.child_nodes(pkg)
8279                 node_stack = direct_deps
8280                 direct_deps = frozenset(direct_deps)
8281                 while node_stack:
8282                         node = node_stack.pop()
8283                         if node in traversed_nodes:
8284                                 continue
8285                         traversed_nodes.add(node)
8286                         if not ((node.installed and node.operation == "nomerge") or \
8287                                 (node.operation == "uninstall" and \
8288                                 node not in direct_deps) or \
8289                                 node in completed_tasks or \
8290                                 node in later):
8291                                 dependent = True
8292                                 break
8293                         node_stack.extend(graph.child_nodes(node))
8294
8295                 return dependent
8296
8297         def _allocate_config(self, root):
8298                 """
8299                 Allocate a unique config instance for a task in order
8300                 to prevent interference between parallel tasks.
8301                 """
8302                 if self._config_pool[root]:
8303                         temp_settings = self._config_pool[root].pop()
8304                 else:
8305                         temp_settings = portage.config(clone=self.pkgsettings[root])
8306                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
8307                 # performance reasons, call it here to make sure all settings from the
8308                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
8309                 temp_settings.reload()
8310                 temp_settings.reset()
8311                 return temp_settings
8312
8313         def _deallocate_config(self, settings):
8314                 self._config_pool[settings["ROOT"]].append(settings)
8315
8316         def _main_loop(self):
8317
8318                 # Only allow 1 job max if a restart is scheduled
8319                 # due to portage update.
8320                 if self._is_restart_scheduled() or \
8321                         self._opts_no_background.intersection(self.myopts):
8322                         self._set_max_jobs(1)
8323
8324                 merge_queue = self._task_queues.merge
8325
8326                 while self._schedule():
8327                         if self._poll_event_handlers:
8328                                 self._poll_loop()
8329
8330                 while True:
8331                         self._schedule()
8332                         if not (self._jobs or merge_queue):
8333                                 break
8334                         if self._poll_event_handlers:
8335                                 self._poll_loop()
8336
8337         def _keep_scheduling(self):
8338                 return bool(self._pkg_queue and \
8339                         not (self._failed_pkgs and not self._build_opts.fetchonly))
8340
8341         def _schedule_tasks(self):
8342
8343                 # When the number of jobs drops to zero, process all waiting merges.
8344                 if not self._jobs and self._merge_wait_queue:
8345                         for task in self._merge_wait_queue:
8346                                 task.addExitListener(self._merge_wait_exit_handler)
8347                                 self._task_queues.merge.add(task)
8348                         self._status_display.merges = len(self._task_queues.merge)
8349                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
8350                         del self._merge_wait_queue[:]
8351
8352                 self._schedule_tasks_imp()
8353                 self._status_display.display()
8354
8355                 state_change = 0
8356                 for q in self._task_queues.values():
8357                         if q.schedule():
8358                                 state_change += 1
8359
8360                 # Cancel prefetchers if they're the only reason
8361                 # the main poll loop is still running.
8362                 if self._failed_pkgs and not self._build_opts.fetchonly and \
8363                         not (self._jobs or self._task_queues.merge) and \
8364                         self._task_queues.fetch:
8365                         self._task_queues.fetch.clear()
8366                         state_change += 1
8367
8368                 if state_change:
8369                         self._schedule_tasks_imp()
8370                         self._status_display.display()
8371
8372                 return self._keep_scheduling()
8373
8374         def _job_delay(self):
8375                 """
8376                 @rtype: bool
8377                 @returns: True if job scheduling should be delayed, False otherwise.
8378                 """
8379
8380                 if self._jobs and self._max_load is not None:
8381
8382                         current_time = time.time()
8383
8384                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
8385                         if delay > self._job_delay_max:
8386                                 delay = self._job_delay_max
8387                         if (current_time - self._previous_job_start_time) < delay:
8388                                 return True
8389
8390                 return False
8391
8392         def _schedule_tasks_imp(self):
8393                 """
8394                 @rtype: bool
8395                 @returns: True if state changed, False otherwise.
8396                 """
8397
8398                 state_change = 0
8399
8400                 while True:
8401
8402                         if not self._keep_scheduling():
8403                                 return bool(state_change)
8404
8405                         if self._choose_pkg_return_early or \
8406                                 self._merge_wait_scheduled or \
8407                                 (self._jobs and self._unsatisfied_system_deps) or \
8408                                 not self._can_add_job() or \
8409                                 self._job_delay():
8410                                 return bool(state_change)
8411
8412                         pkg = self._choose_pkg()
8413                         if pkg is None:
8414                                 return bool(state_change)
8415
8416                         state_change += 1
8417
8418                         if not pkg.installed:
8419                                 self._pkg_count.curval += 1
8420
8421                         task = self._task(pkg)
8422
8423                         if pkg.installed:
8424                                 merge = PackageMerge(merge=task)
8425                                 merge.addExitListener(self._merge_exit)
8426                                 self._task_queues.merge.add(merge)
8427
8428                         elif pkg.built:
8429                                 self._jobs += 1
8430                                 self._previous_job_start_time = time.time()
8431                                 self._status_display.running = self._jobs
8432                                 task.addExitListener(self._extract_exit)
8433                                 self._task_queues.jobs.add(task)
8434
8435                         else:
8436                                 self._jobs += 1
8437                                 self._previous_job_start_time = time.time()
8438                                 self._status_display.running = self._jobs
8439                                 task.addExitListener(self._build_exit)
8440                                 self._task_queues.jobs.add(task)
8441
8442                 return bool(state_change)
8443
8444         def _task(self, pkg):
8445
8446                 pkg_to_replace = None
8447                 if pkg.operation != "uninstall":
8448                         vardb = pkg.root_config.trees["vartree"].dbapi
8449                         previous_cpv = vardb.match(pkg.slot_atom)
8450                         if previous_cpv:
8451                                 previous_cpv = previous_cpv.pop()
8452                                 pkg_to_replace = self._pkg(previous_cpv,
8453                                         "installed", pkg.root_config, installed=True)
8454
8455                 task = MergeListItem(args_set=self._args_set,
8456                         background=self._background, binpkg_opts=self._binpkg_opts,
8457                         build_opts=self._build_opts,
8458                         config_pool=self._ConfigPool(pkg.root,
8459                         self._allocate_config, self._deallocate_config),
8460                         emerge_opts=self.myopts,
8461                         find_blockers=self._find_blockers(pkg), logger=self._logger,
8462                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
8463                         pkg_to_replace=pkg_to_replace,
8464                         prefetcher=self._prefetchers.get(pkg),
8465                         scheduler=self._sched_iface,
8466                         settings=self._allocate_config(pkg.root),
8467                         statusMessage=self._status_msg,
8468                         world_atom=self._world_atom)
8469
8470                 return task
8471
8472         def _failed_pkg_msg(self, failed_pkg, action, preposition):
8473                 pkg = failed_pkg.pkg
8474                 msg = "%s to %s %s" % \
8475                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
8476                 if pkg.root != "/":
8477                         msg += " %s %s" % (preposition, pkg.root)
8478
8479                 log_path = self._locate_failure_log(failed_pkg)
8480                 if log_path is not None:
8481                         msg += ", Log file:"
8482                 self._status_msg(msg)
8483
8484                 if log_path is not None:
8485                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
8486
8487         def _status_msg(self, msg):
8488                 """
8489                 Display a brief status message (no newlines) in the status display.
8490                 This is called by tasks to provide feedback to the user. This
8491                 delegates the resposibility of generating \r and \n control characters,
8492                 to guarantee that lines are created or erased when necessary and
8493                 appropriate.
8494
8495                 @type msg: str
8496                 @param msg: a brief status message (no newlines allowed)
8497                 """
8498                 if not self._background:
8499                         writemsg_level("\n")
8500                 self._status_display.displayMessage(msg)
8501
8502         def _save_resume_list(self):
8503                 """
8504                 Do this before verifying the ebuild Manifests since it might
8505                 be possible for the user to use --resume --skipfirst get past
8506                 a non-essential package with a broken digest.
8507                 """
8508                 mtimedb = self._mtimedb
8509                 mtimedb["resume"]["mergelist"] = [list(x) \
8510                         for x in self._mergelist \
8511                         if isinstance(x, Package) and x.operation == "merge"]
8512
8513                 mtimedb.commit()
8514
8515         def _calc_resume_list(self):
8516                 """
8517                 Use the current resume list to calculate a new one,
8518                 dropping any packages with unsatisfied deps.
8519                 @rtype: bool
8520                 @returns: True if successful, False otherwise.
8521                 """
8522                 print colorize("GOOD", "*** Resuming merge...")
8523
8524                 if self._show_list():
8525                         if "--tree" in self.myopts:
8526                                 portage.writemsg_stdout("\n" + \
8527                                         darkgreen("These are the packages that " + \
8528                                         "would be merged, in reverse order:\n\n"))
8529
8530                         else:
8531                                 portage.writemsg_stdout("\n" + \
8532                                         darkgreen("These are the packages that " + \
8533                                         "would be merged, in order:\n\n"))
8534
8535                 show_spinner = "--quiet" not in self.myopts and \
8536                         "--nodeps" not in self.myopts
8537
8538                 if show_spinner:
8539                         print "Calculating dependencies  ",
8540
8541                 myparams = create_depgraph_params(self.myopts, None)
8542                 success = False
8543                 e = None
8544                 try:
8545                         success, mydepgraph, dropped_tasks = resume_depgraph(
8546                                 self.settings, self.trees, self._mtimedb, self.myopts,
8547                                 myparams, self._spinner)
8548                 except depgraph.UnsatisfiedResumeDep, exc:
8549                         # rename variable to avoid python-3.0 error:
8550                         # SyntaxError: can not delete variable 'e' referenced in nested
8551                         #              scope
8552                         e = exc
8553                         mydepgraph = e.depgraph
8554                         dropped_tasks = set()
8555
8556                 if show_spinner:
8557                         print "\b\b... done!"
8558
8559                 if e is not None:
8560                         def unsatisfied_resume_dep_msg():
8561                                 mydepgraph.display_problems()
8562                                 out = portage.output.EOutput()
8563                                 out.eerror("One or more packages are either masked or " + \
8564                                         "have missing dependencies:")
8565                                 out.eerror("")
8566                                 indent = "  "
8567                                 show_parents = set()
8568                                 for dep in e.value:
8569                                         if dep.parent in show_parents:
8570                                                 continue
8571                                         show_parents.add(dep.parent)
8572                                         if dep.atom is None:
8573                                                 out.eerror(indent + "Masked package:")
8574                                                 out.eerror(2 * indent + str(dep.parent))
8575                                                 out.eerror("")
8576                                         else:
8577                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
8578                                                 out.eerror(2 * indent + str(dep.parent))
8579                                                 out.eerror("")
8580                                 msg = "The resume list contains packages " + \
8581                                         "that are either masked or have " + \
8582                                         "unsatisfied dependencies. " + \
8583                                         "Please restart/continue " + \
8584                                         "the operation manually, or use --skipfirst " + \
8585                                         "to skip the first package in the list and " + \
8586                                         "any other packages that may be " + \
8587                                         "masked or have missing dependencies."
8588                                 for line in textwrap.wrap(msg, 72):
8589                                         out.eerror(line)
8590                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
8591                         return False
8592
8593                 if success and self._show_list():
8594                         mylist = mydepgraph.altlist()
8595                         if mylist:
8596                                 if "--tree" in self.myopts:
8597                                         mylist.reverse()
8598                                 mydepgraph.display(mylist, favorites=self._favorites)
8599
8600                 if not success:
8601                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
8602                         return False
8603                 mydepgraph.display_problems()
8604
8605                 mylist = mydepgraph.altlist()
8606                 mydepgraph.break_refs(mylist)
8607                 mydepgraph.break_refs(dropped_tasks)
8608                 self._mergelist = mylist
8609                 self._set_digraph(mydepgraph.schedulerGraph())
8610
8611                 msg_width = 75
8612                 for task in dropped_tasks:
8613                         if not (isinstance(task, Package) and task.operation == "merge"):
8614                                 continue
8615                         pkg = task
8616                         msg = "emerge --keep-going:" + \
8617                                 " %s" % (pkg.cpv,)
8618                         if pkg.root != "/":
8619                                 msg += " for %s" % (pkg.root,)
8620                         msg += " dropped due to unsatisfied dependency."
8621                         for line in textwrap.wrap(msg, msg_width):
8622                                 eerror(line, phase="other", key=pkg.cpv)
8623                         settings = self.pkgsettings[pkg.root]
8624                         # Ensure that log collection from $T is disabled inside
8625                         # elog_process(), since any logs that might exist are
8626                         # not valid here.
8627                         settings.pop("T", None)
8628                         portage.elog.elog_process(pkg.cpv, settings)
8629                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
8630
8631                 return True
8632
8633         def _show_list(self):
8634                 myopts = self.myopts
8635                 if "--quiet" not in myopts and \
8636                         ("--ask" in myopts or "--tree" in myopts or \
8637                         "--verbose" in myopts):
8638                         return True
8639                 return False
8640
8641         def _world_atom(self, pkg):
8642                 """
8643                 Add the package to the world file, but only if
8644                 it's supposed to be added. Otherwise, do nothing.
8645                 """
8646
8647                 if set(("--buildpkgonly", "--fetchonly",
8648                         "--fetch-all-uri",
8649                         "--oneshot", "--onlydeps",
8650                         "--pretend")).intersection(self.myopts):
8651                         return
8652
8653                 if pkg.root != self.target_root:
8654                         return
8655
8656                 args_set = self._args_set
8657                 if not args_set.findAtomForPackage(pkg):
8658                         return
8659
8660                 logger = self._logger
8661                 pkg_count = self._pkg_count
8662                 root_config = pkg.root_config
8663                 world_set = root_config.sets["world"]
8664                 world_locked = False
8665                 if hasattr(world_set, "lock"):
8666                         world_set.lock()
8667                         world_locked = True
8668
8669                 try:
8670                         if hasattr(world_set, "load"):
8671                                 world_set.load() # maybe it's changed on disk
8672
8673                         atom = create_world_atom(pkg, args_set, root_config)
8674                         if atom:
8675                                 if hasattr(world_set, "add"):
8676                                         self._status_msg(('Recording %s in "world" ' + \
8677                                                 'favorites file...') % atom)
8678                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
8679                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
8680                                         world_set.add(atom)
8681                                 else:
8682                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
8683                                                 (atom,), level=logging.WARN, noiselevel=-1)
8684                 finally:
8685                         if world_locked:
8686                                 world_set.unlock()
8687
8688         def _pkg(self, cpv, type_name, root_config, installed=False):
8689                 """
8690                 Get a package instance from the cache, or create a new
8691                 one if necessary. Raises KeyError from aux_get if it
8692                 failures for some reason (package does not exist or is
8693                 corrupt).
8694                 """
8695                 operation = "merge"
8696                 if installed:
8697                         operation = "nomerge"
8698
8699                 if self._digraph is not None:
8700                         # Reuse existing instance when available.
8701                         pkg = self._digraph.get(
8702                                 (type_name, root_config.root, cpv, operation))
8703                         if pkg is not None:
8704                                 return pkg
8705
8706                 tree_type = depgraph.pkg_tree_map[type_name]
8707                 db = root_config.trees[tree_type].dbapi
8708                 db_keys = list(self.trees[root_config.root][
8709                         tree_type].dbapi._aux_cache_keys)
8710                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
8711                 pkg = Package(cpv=cpv, metadata=metadata,
8712                         root_config=root_config, installed=installed)
8713                 if type_name == "ebuild":
8714                         settings = self.pkgsettings[root_config.root]
8715                         settings.setcpv(pkg)
8716                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
8717                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
8718
8719                 return pkg
8720
8721 class MetadataRegen(PollScheduler):
8722
8723         def __init__(self, portdb, cp_iter=None, consumer=None,
8724                 max_jobs=None, max_load=None):
8725                 PollScheduler.__init__(self)
8726                 self._portdb = portdb
8727                 self._global_cleanse = False
8728                 if cp_iter is None:
8729                         cp_iter = self._iter_every_cp()
8730                         # We can globally cleanse stale cache only if we
8731                         # iterate over every single cp.
8732                         self._global_cleanse = True
8733                 self._cp_iter = cp_iter
8734                 self._consumer = consumer
8735
8736                 if max_jobs is None:
8737                         max_jobs = 1
8738
8739                 self._max_jobs = max_jobs
8740                 self._max_load = max_load
8741                 self._sched_iface = self._sched_iface_class(
8742                         register=self._register,
8743                         schedule=self._schedule_wait,
8744                         unregister=self._unregister)
8745
8746                 self._valid_pkgs = set()
8747                 self._cp_set = set()
8748                 self._process_iter = self._iter_metadata_processes()
8749                 self.returncode = os.EX_OK
8750                 self._error_count = 0
8751
8752         def _iter_every_cp(self):
8753                 every_cp = self._portdb.cp_all()
8754                 every_cp.sort(reverse=True)
8755                 try:
8756                         while True:
8757                                 yield every_cp.pop()
8758                 except IndexError:
8759                         pass
8760
8761         def _iter_metadata_processes(self):
8762                 portdb = self._portdb
8763                 valid_pkgs = self._valid_pkgs
8764                 cp_set = self._cp_set
8765                 consumer = self._consumer
8766
8767                 for cp in self._cp_iter:
8768                         cp_set.add(cp)
8769                         portage.writemsg_stdout("Processing %s\n" % cp)
8770                         cpv_list = portdb.cp_list(cp)
8771                         for cpv in cpv_list:
8772                                 valid_pkgs.add(cpv)
8773                                 ebuild_path, repo_path = portdb.findname2(cpv)
8774                                 metadata, st, emtime = portdb._pull_valid_cache(
8775                                         cpv, ebuild_path, repo_path)
8776                                 if metadata is not None:
8777                                         if consumer is not None:
8778                                                 consumer(cpv, ebuild_path,
8779                                                         repo_path, metadata)
8780                                         continue
8781
8782                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
8783                                         ebuild_mtime=emtime,
8784                                         metadata_callback=portdb._metadata_callback,
8785                                         portdb=portdb, repo_path=repo_path,
8786                                         settings=portdb.doebuild_settings)
8787
8788         def run(self):
8789
8790                 portdb = self._portdb
8791                 from portage.cache.cache_errors import CacheError
8792                 dead_nodes = {}
8793
8794                 while self._schedule():
8795                         self._poll_loop()
8796
8797                 while self._jobs:
8798                         self._poll_loop()
8799
8800                 if self._global_cleanse:
8801                         for mytree in portdb.porttrees:
8802                                 try:
8803                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
8804                                 except CacheError, e:
8805                                         portage.writemsg("Error listing cache entries for " + \
8806                                                 "'%s': %s, continuing...\n" % (mytree, e),
8807                                                 noiselevel=-1)
8808                                         del e
8809                                         dead_nodes = None
8810                                         break
8811                 else:
8812                         cp_set = self._cp_set
8813                         cpv_getkey = portage.cpv_getkey
8814                         for mytree in portdb.porttrees:
8815                                 try:
8816                                         dead_nodes[mytree] = set(cpv for cpv in \
8817                                                 portdb.auxdb[mytree].iterkeys() \
8818                                                 if cpv_getkey(cpv) in cp_set)
8819                                 except CacheError, e:
8820                                         portage.writemsg("Error listing cache entries for " + \
8821                                                 "'%s': %s, continuing...\n" % (mytree, e),
8822                                                 noiselevel=-1)
8823                                         del e
8824                                         dead_nodes = None
8825                                         break
8826
8827                 if dead_nodes:
8828                         for y in self._valid_pkgs:
8829                                 for mytree in portdb.porttrees:
8830                                         if portdb.findname2(y, mytree=mytree)[0]:
8831                                                 dead_nodes[mytree].discard(y)
8832
8833                         for mytree, nodes in dead_nodes.iteritems():
8834                                 auxdb = portdb.auxdb[mytree]
8835                                 for y in nodes:
8836                                         try:
8837                                                 del auxdb[y]
8838                                         except (KeyError, CacheError):
8839                                                 pass
8840
8841         def _schedule_tasks(self):
8842                 """
8843                 @rtype: bool
8844                 @returns: True if there may be remaining tasks to schedule,
8845                         False otherwise.
8846                 """
8847                 while self._can_add_job():
8848                         try:
8849                                 metadata_process = self._process_iter.next()
8850                         except StopIteration:
8851                                 return False
8852
8853                         self._jobs += 1
8854                         metadata_process.scheduler = self._sched_iface
8855                         metadata_process.addExitListener(self._metadata_exit)
8856                         metadata_process.start()
8857                 return True
8858
8859         def _metadata_exit(self, metadata_process):
8860                 self._jobs -= 1
8861                 if metadata_process.returncode != os.EX_OK:
8862                         self.returncode = 1
8863                         self._error_count += 1
8864                         self._valid_pkgs.discard(metadata_process.cpv)
8865                         portage.writemsg("Error processing %s, continuing...\n" % \
8866                                 (metadata_process.cpv,), noiselevel=-1)
8867
8868                 if self._consumer is not None:
8869                         # On failure, still notify the consumer (in this case the metadata
8870                         # argument is None).
8871                         self._consumer(metadata_process.cpv,
8872                                 metadata_process.ebuild_path,
8873                                 metadata_process.repo_path,
8874                                 metadata_process.metadata)
8875
8876                 self._schedule()
8877
8878 class UninstallFailure(portage.exception.PortageException):
8879         """
8880         An instance of this class is raised by unmerge() when
8881         an uninstallation fails.
8882         """
8883         status = 1
8884         def __init__(self, *pargs):
8885                 portage.exception.PortageException.__init__(self, pargs)
8886                 if pargs:
8887                         self.status = pargs[0]
8888
8889 def unmerge(root_config, myopts, unmerge_action,
8890         unmerge_files, ldpath_mtimes, autoclean=0,
8891         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
8892         scheduler=None, writemsg_level=portage.util.writemsg_level):
8893
8894         if clean_world:
8895                 clean_world = myopts.get('--deselect') != 'n'
8896         quiet = "--quiet" in myopts
8897         settings = root_config.settings
8898         sets = root_config.sets
8899         vartree = root_config.trees["vartree"]
8900         candidate_catpkgs=[]
8901         global_unmerge=0
8902         xterm_titles = "notitles" not in settings.features
8903         out = portage.output.EOutput()
8904         pkg_cache = {}
8905         db_keys = list(vartree.dbapi._aux_cache_keys)
8906
8907         def _pkg(cpv):
8908                 pkg = pkg_cache.get(cpv)
8909                 if pkg is None:
8910                         pkg = Package(cpv=cpv, installed=True,
8911                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
8912                                 root_config=root_config,
8913                                 type_name="installed")
8914                         pkg_cache[cpv] = pkg
8915                 return pkg
8916
8917         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8918         try:
8919                 # At least the parent needs to exist for the lock file.
8920                 portage.util.ensure_dirs(vdb_path)
8921         except portage.exception.PortageException:
8922                 pass
8923         vdb_lock = None
8924         try:
8925                 if os.access(vdb_path, os.W_OK):
8926                         vdb_lock = portage.locks.lockdir(vdb_path)
8927                 realsyslist = sets["system"].getAtoms()
8928                 syslist = []
8929                 for x in realsyslist:
8930                         mycp = portage.dep_getkey(x)
8931                         if mycp in settings.getvirtuals():
8932                                 providers = []
8933                                 for provider in settings.getvirtuals()[mycp]:
8934                                         if vartree.dbapi.match(provider):
8935                                                 providers.append(provider)
8936                                 if len(providers) == 1:
8937                                         syslist.extend(providers)
8938                         else:
8939                                 syslist.append(mycp)
8940         
8941                 mysettings = portage.config(clone=settings)
8942         
8943                 if not unmerge_files:
8944                         if unmerge_action == "unmerge":
8945                                 print
8946                                 print bold("emerge unmerge") + " can only be used with specific package names"
8947                                 print
8948                                 return 0
8949                         else:
8950                                 global_unmerge = 1
8951         
8952                 localtree = vartree
8953                 # process all arguments and add all
8954                 # valid db entries to candidate_catpkgs
8955                 if global_unmerge:
8956                         if not unmerge_files:
8957                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
8958                 else:
8959                         #we've got command-line arguments
8960                         if not unmerge_files:
8961                                 print "\nNo packages to unmerge have been provided.\n"
8962                                 return 0
8963                         for x in unmerge_files:
8964                                 arg_parts = x.split('/')
8965                                 if x[0] not in [".","/"] and \
8966                                         arg_parts[-1][-7:] != ".ebuild":
8967                                         #possible cat/pkg or dep; treat as such
8968                                         candidate_catpkgs.append(x)
8969                                 elif unmerge_action in ["prune","clean"]:
8970                                         print "\n!!! Prune and clean do not accept individual" + \
8971                                                 " ebuilds as arguments;\n    skipping.\n"
8972                                         continue
8973                                 else:
8974                                         # it appears that the user is specifying an installed
8975                                         # ebuild and we're in "unmerge" mode, so it's ok.
8976                                         if not os.path.exists(x):
8977                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
8978                                                 return 0
8979         
8980                                         absx   = os.path.abspath(x)
8981                                         sp_absx = absx.split("/")
8982                                         if sp_absx[-1][-7:] == ".ebuild":
8983                                                 del sp_absx[-1]
8984                                                 absx = "/".join(sp_absx)
8985         
8986                                         sp_absx_len = len(sp_absx)
8987         
8988                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
8989                                         vdb_len  = len(vdb_path)
8990         
8991                                         sp_vdb     = vdb_path.split("/")
8992                                         sp_vdb_len = len(sp_vdb)
8993         
8994                                         if not os.path.exists(absx+"/CONTENTS"):
8995                                                 print "!!! Not a valid db dir: "+str(absx)
8996                                                 return 0
8997         
8998                                         if sp_absx_len <= sp_vdb_len:
8999                                                 # The Path is shorter... so it can't be inside the vdb.
9000                                                 print sp_absx
9001                                                 print absx
9002                                                 print "\n!!!",x,"cannot be inside "+ \
9003                                                         vdb_path+"; aborting.\n"
9004                                                 return 0
9005         
9006                                         for idx in range(0,sp_vdb_len):
9007                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
9008                                                         print sp_absx
9009                                                         print absx
9010                                                         print "\n!!!", x, "is not inside "+\
9011                                                                 vdb_path+"; aborting.\n"
9012                                                         return 0
9013         
9014                                         print "="+"/".join(sp_absx[sp_vdb_len:])
9015                                         candidate_catpkgs.append(
9016                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
9017         
9018                 newline=""
9019                 if (not "--quiet" in myopts):
9020                         newline="\n"
9021                 if settings["ROOT"] != "/":
9022                         writemsg_level(darkgreen(newline+ \
9023                                 ">>> Using system located in ROOT tree %s\n" % \
9024                                 settings["ROOT"]))
9025
9026                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
9027                         not ("--quiet" in myopts):
9028                         writemsg_level(darkgreen(newline+\
9029                                 ">>> These are the packages that would be unmerged:\n"))
9030
9031                 # Preservation of order is required for --depclean and --prune so
9032                 # that dependencies are respected. Use all_selected to eliminate
9033                 # duplicate packages since the same package may be selected by
9034                 # multiple atoms.
9035                 pkgmap = []
9036                 all_selected = set()
9037                 for x in candidate_catpkgs:
9038                         # cycle through all our candidate deps and determine
9039                         # what will and will not get unmerged
9040                         try:
9041                                 mymatch = vartree.dbapi.match(x)
9042                         except portage.exception.AmbiguousPackageName, errpkgs:
9043                                 print "\n\n!!! The short ebuild name \"" + \
9044                                         x + "\" is ambiguous.  Please specify"
9045                                 print "!!! one of the following fully-qualified " + \
9046                                         "ebuild names instead:\n"
9047                                 for i in errpkgs[0]:
9048                                         print "    " + green(i)
9049                                 print
9050                                 sys.exit(1)
9051         
9052                         if not mymatch and x[0] not in "<>=~":
9053                                 mymatch = localtree.dep_match(x)
9054                         if not mymatch:
9055                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
9056                                         (x, unmerge_action), noiselevel=-1)
9057                                 continue
9058
9059                         pkgmap.append(
9060                                 {"protected": set(), "selected": set(), "omitted": set()})
9061                         mykey = len(pkgmap) - 1
9062                         if unmerge_action=="unmerge":
9063                                         for y in mymatch:
9064                                                 if y not in all_selected:
9065                                                         pkgmap[mykey]["selected"].add(y)
9066                                                         all_selected.add(y)
9067                         elif unmerge_action == "prune":
9068                                 if len(mymatch) == 1:
9069                                         continue
9070                                 best_version = mymatch[0]
9071                                 best_slot = vartree.getslot(best_version)
9072                                 best_counter = vartree.dbapi.cpv_counter(best_version)
9073                                 for mypkg in mymatch[1:]:
9074                                         myslot = vartree.getslot(mypkg)
9075                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
9076                                         if (myslot == best_slot and mycounter > best_counter) or \
9077                                                 mypkg == portage.best([mypkg, best_version]):
9078                                                 if myslot == best_slot:
9079                                                         if mycounter < best_counter:
9080                                                                 # On slot collision, keep the one with the
9081                                                                 # highest counter since it is the most
9082                                                                 # recently installed.
9083                                                                 continue
9084                                                 best_version = mypkg
9085                                                 best_slot = myslot
9086                                                 best_counter = mycounter
9087                                 pkgmap[mykey]["protected"].add(best_version)
9088                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
9089                                         if mypkg != best_version and mypkg not in all_selected)
9090                                 all_selected.update(pkgmap[mykey]["selected"])
9091                         else:
9092                                 # unmerge_action == "clean"
9093                                 slotmap={}
9094                                 for mypkg in mymatch:
9095                                         if unmerge_action == "clean":
9096                                                 myslot = localtree.getslot(mypkg)
9097                                         else:
9098                                                 # since we're pruning, we don't care about slots
9099                                                 # and put all the pkgs in together
9100                                                 myslot = 0
9101                                         if myslot not in slotmap:
9102                                                 slotmap[myslot] = {}
9103                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
9104
9105                                 for mypkg in vartree.dbapi.cp_list(
9106                                         portage.dep_getkey(mymatch[0])):
9107                                         myslot = vartree.getslot(mypkg)
9108                                         if myslot not in slotmap:
9109                                                 slotmap[myslot] = {}
9110                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
9111
9112                                 for myslot in slotmap:
9113                                         counterkeys = slotmap[myslot].keys()
9114                                         if not counterkeys:
9115                                                 continue
9116                                         counterkeys.sort()
9117                                         pkgmap[mykey]["protected"].add(
9118                                                 slotmap[myslot][counterkeys[-1]])
9119                                         del counterkeys[-1]
9120
9121                                         for counter in counterkeys[:]:
9122                                                 mypkg = slotmap[myslot][counter]
9123                                                 if mypkg not in mymatch:
9124                                                         counterkeys.remove(counter)
9125                                                         pkgmap[mykey]["protected"].add(
9126                                                                 slotmap[myslot][counter])
9127
9128                                         #be pretty and get them in order of merge:
9129                                         for ckey in counterkeys:
9130                                                 mypkg = slotmap[myslot][ckey]
9131                                                 if mypkg not in all_selected:
9132                                                         pkgmap[mykey]["selected"].add(mypkg)
9133                                                         all_selected.add(mypkg)
9134                                         # ok, now the last-merged package
9135                                         # is protected, and the rest are selected
9136                 numselected = len(all_selected)
9137                 if global_unmerge and not numselected:
9138                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
9139                         return 0
9140         
9141                 if not numselected:
9142                         portage.writemsg_stdout(
9143                                 "\n>>> No packages selected for removal by " + \
9144                                 unmerge_action + "\n")
9145                         return 0
9146         finally:
9147                 if vdb_lock:
9148                         vartree.dbapi.flush_cache()
9149                         portage.locks.unlockdir(vdb_lock)
9150         
9151         from portage.sets.base import EditablePackageSet
9152         
9153         # generate a list of package sets that are directly or indirectly listed in "world",
9154         # as there is no persistent list of "installed" sets
9155         installed_sets = ["world"]
9156         stop = False
9157         pos = 0
9158         while not stop:
9159                 stop = True
9160                 pos = len(installed_sets)
9161                 for s in installed_sets[pos - 1:]:
9162                         if s not in sets:
9163                                 continue
9164                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
9165                         if candidates:
9166                                 stop = False
9167                                 installed_sets += candidates
9168         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
9169         del stop, pos
9170
9171         # we don't want to unmerge packages that are still listed in user-editable package sets
9172         # listed in "world" as they would be remerged on the next update of "world" or the 
9173         # relevant package sets.
9174         unknown_sets = set()
9175         for cp in xrange(len(pkgmap)):
9176                 for cpv in pkgmap[cp]["selected"].copy():
9177                         try:
9178                                 pkg = _pkg(cpv)
9179                         except KeyError:
9180                                 # It could have been uninstalled
9181                                 # by a concurrent process.
9182                                 continue
9183
9184                         if unmerge_action != "clean" and \
9185                                 root_config.root == "/" and \
9186                                 portage.match_from_list(
9187                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9188                                 msg = ("Not unmerging package %s since there is no valid " + \
9189                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
9190                                 for line in textwrap.wrap(msg, 75):
9191                                         out.eerror(line)
9192                                 # adjust pkgmap so the display output is correct
9193                                 pkgmap[cp]["selected"].remove(cpv)
9194                                 all_selected.remove(cpv)
9195                                 pkgmap[cp]["protected"].add(cpv)
9196                                 continue
9197
9198                         parents = []
9199                         for s in installed_sets:
9200                                 # skip sets that the user requested to unmerge, and skip world 
9201                                 # unless we're unmerging a package set (as the package would be 
9202                                 # removed from "world" later on)
9203                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
9204                                         continue
9205
9206                                 if s not in sets:
9207                                         if s in unknown_sets:
9208                                                 continue
9209                                         unknown_sets.add(s)
9210                                         out = portage.output.EOutput()
9211                                         out.eerror(("Unknown set '@%s' in " + \
9212                                                 "%svar/lib/portage/world_sets") % \
9213                                                 (s, root_config.root))
9214                                         continue
9215
9216                                 # only check instances of EditablePackageSet as other classes are generally used for
9217                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
9218                                 # user can't do much about them anyway)
9219                                 if isinstance(sets[s], EditablePackageSet):
9220
9221                                         # This is derived from a snippet of code in the
9222                                         # depgraph._iter_atoms_for_pkg() method.
9223                                         for atom in sets[s].iterAtomsForPackage(pkg):
9224                                                 inst_matches = vartree.dbapi.match(atom)
9225                                                 inst_matches.reverse() # descending order
9226                                                 higher_slot = None
9227                                                 for inst_cpv in inst_matches:
9228                                                         try:
9229                                                                 inst_pkg = _pkg(inst_cpv)
9230                                                         except KeyError:
9231                                                                 # It could have been uninstalled
9232                                                                 # by a concurrent process.
9233                                                                 continue
9234
9235                                                         if inst_pkg.cp != atom.cp:
9236                                                                 continue
9237                                                         if pkg >= inst_pkg:
9238                                                                 # This is descending order, and we're not
9239                                                                 # interested in any versions <= pkg given.
9240                                                                 break
9241                                                         if pkg.slot_atom != inst_pkg.slot_atom:
9242                                                                 higher_slot = inst_pkg
9243                                                                 break
9244                                                 if higher_slot is None:
9245                                                         parents.append(s)
9246                                                         break
9247                         if parents:
9248                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
9249                                 #print colorize("WARN", "but still listed in the following package sets:")
9250                                 #print "    %s\n" % ", ".join(parents)
9251                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
9252                                 print colorize("WARN", "still referenced by the following package sets:")
9253                                 print "    %s\n" % ", ".join(parents)
9254                                 # adjust pkgmap so the display output is correct
9255                                 pkgmap[cp]["selected"].remove(cpv)
9256                                 all_selected.remove(cpv)
9257                                 pkgmap[cp]["protected"].add(cpv)
9258         
9259         del installed_sets
9260
9261         numselected = len(all_selected)
9262         if not numselected:
9263                 writemsg_level(
9264                         "\n>>> No packages selected for removal by " + \
9265                         unmerge_action + "\n")
9266                 return 0
9267
9268         # Unmerge order only matters in some cases
9269         if not ordered:
9270                 unordered = {}
9271                 for d in pkgmap:
9272                         selected = d["selected"]
9273                         if not selected:
9274                                 continue
9275                         cp = portage.cpv_getkey(iter(selected).next())
9276                         cp_dict = unordered.get(cp)
9277                         if cp_dict is None:
9278                                 cp_dict = {}
9279                                 unordered[cp] = cp_dict
9280                                 for k in d:
9281                                         cp_dict[k] = set()
9282                         for k, v in d.iteritems():
9283                                 cp_dict[k].update(v)
9284                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
9285
9286         for x in xrange(len(pkgmap)):
9287                 selected = pkgmap[x]["selected"]
9288                 if not selected:
9289                         continue
9290                 for mytype, mylist in pkgmap[x].iteritems():
9291                         if mytype == "selected":
9292                                 continue
9293                         mylist.difference_update(all_selected)
9294                 cp = portage.cpv_getkey(iter(selected).next())
9295                 for y in localtree.dep_match(cp):
9296                         if y not in pkgmap[x]["omitted"] and \
9297                                 y not in pkgmap[x]["selected"] and \
9298                                 y not in pkgmap[x]["protected"] and \
9299                                 y not in all_selected:
9300                                 pkgmap[x]["omitted"].add(y)
9301                 if global_unmerge and not pkgmap[x]["selected"]:
9302                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
9303                         continue
9304                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
9305                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
9306                                 "'%s' is part of your system profile.\n" % cp),
9307                                 level=logging.WARNING, noiselevel=-1)
9308                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
9309                                 "be damaging to your system.\n\n"),
9310                                 level=logging.WARNING, noiselevel=-1)
9311                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
9312                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
9313                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
9314                 if not quiet:
9315                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
9316                 else:
9317                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
9318                 for mytype in ["selected","protected","omitted"]:
9319                         if not quiet:
9320                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
9321                         if pkgmap[x][mytype]:
9322                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
9323                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
9324                                 for pn, ver, rev in sorted_pkgs:
9325                                         if rev == "r0":
9326                                                 myversion = ver
9327                                         else:
9328                                                 myversion = ver + "-" + rev
9329                                         if mytype == "selected":
9330                                                 writemsg_level(
9331                                                         colorize("UNMERGE_WARN", myversion + " "),
9332                                                         noiselevel=-1)
9333                                         else:
9334                                                 writemsg_level(
9335                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
9336                         else:
9337                                 writemsg_level("none ", noiselevel=-1)
9338                         if not quiet:
9339                                 writemsg_level("\n", noiselevel=-1)
9340                 if quiet:
9341                         writemsg_level("\n", noiselevel=-1)
9342
9343         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
9344                 " packages are slated for removal.\n")
9345         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
9346                         " and " + colorize("GOOD", "'omitted'") + \
9347                         " packages will not be removed.\n\n")
9348
9349         if "--pretend" in myopts:
9350                 #we're done... return
9351                 return 0
9352         if "--ask" in myopts:
9353                 if userquery("Would you like to unmerge these packages?")=="No":
9354                         # enter pretend mode for correct formatting of results
9355                         myopts["--pretend"] = True
9356                         print
9357                         print "Quitting."
9358                         print
9359                         return 0
9360         #the real unmerging begins, after a short delay....
9361         if clean_delay and not autoclean:
9362                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
9363
9364         for x in xrange(len(pkgmap)):
9365                 for y in pkgmap[x]["selected"]:
9366                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
9367                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
9368                         mysplit = y.split("/")
9369                         #unmerge...
9370                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
9371                                 mysettings, unmerge_action not in ["clean","prune"],
9372                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
9373                                 scheduler=scheduler)
9374
9375                         if retval != os.EX_OK:
9376                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
9377                                 if raise_on_error:
9378                                         raise UninstallFailure(retval)
9379                                 sys.exit(retval)
9380                         else:
9381                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
9382                                         sets["world"].cleanPackage(vartree.dbapi, y)
9383                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
9384         if clean_world and hasattr(sets["world"], "remove"):
9385                 for s in root_config.setconfig.active:
9386                         sets["world"].remove(SETPREFIX+s)
9387         return 1
9388
9389 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
9390
9391         if os.path.exists("/usr/bin/install-info"):
9392                 out = portage.output.EOutput()
9393                 regen_infodirs=[]
9394                 for z in infodirs:
9395                         if z=='':
9396                                 continue
9397                         inforoot=normpath(root+z)
9398                         if os.path.isdir(inforoot):
9399                                 infomtime = long(os.stat(inforoot).st_mtime)
9400                                 if inforoot not in prev_mtimes or \
9401                                         prev_mtimes[inforoot] != infomtime:
9402                                                 regen_infodirs.append(inforoot)
9403
9404                 if not regen_infodirs:
9405                         portage.writemsg_stdout("\n")
9406                         out.einfo("GNU info directory index is up-to-date.")
9407                 else:
9408                         portage.writemsg_stdout("\n")
9409                         out.einfo("Regenerating GNU info directory index...")
9410
9411                         dir_extensions = ("", ".gz", ".bz2")
9412                         icount=0
9413                         badcount=0
9414                         errmsg = ""
9415                         for inforoot in regen_infodirs:
9416                                 if inforoot=='':
9417                                         continue
9418
9419                                 if not os.path.isdir(inforoot) or \
9420                                         not os.access(inforoot, os.W_OK):
9421                                         continue
9422
9423                                 file_list = os.listdir(inforoot)
9424                                 file_list.sort()
9425                                 dir_file = os.path.join(inforoot, "dir")
9426                                 moved_old_dir = False
9427                                 processed_count = 0
9428                                 for x in file_list:
9429                                         if x.startswith(".") or \
9430                                                 os.path.isdir(os.path.join(inforoot, x)):
9431                                                 continue
9432                                         if x.startswith("dir"):
9433                                                 skip = False
9434                                                 for ext in dir_extensions:
9435                                                         if x == "dir" + ext or \
9436                                                                 x == "dir" + ext + ".old":
9437                                                                 skip = True
9438                                                                 break
9439                                                 if skip:
9440                                                         continue
9441                                         if processed_count == 0:
9442                                                 for ext in dir_extensions:
9443                                                         try:
9444                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
9445                                                                 moved_old_dir = True
9446                                                         except EnvironmentError, e:
9447                                                                 if e.errno != errno.ENOENT:
9448                                                                         raise
9449                                                                 del e
9450                                         processed_count += 1
9451                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
9452                                         existsstr="already exists, for file `"
9453                                         if myso!="":
9454                                                 if re.search(existsstr,myso):
9455                                                         # Already exists... Don't increment the count for this.
9456                                                         pass
9457                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
9458                                                         # This info file doesn't contain a DIR-header: install-info produces this
9459                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
9460                                                         # Don't increment the count for this.
9461                                                         pass
9462                                                 else:
9463                                                         badcount=badcount+1
9464                                                         errmsg += myso + "\n"
9465                                         icount=icount+1
9466
9467                                 if moved_old_dir and not os.path.exists(dir_file):
9468                                         # We didn't generate a new dir file, so put the old file
9469                                         # back where it was originally found.
9470                                         for ext in dir_extensions:
9471                                                 try:
9472                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
9473                                                 except EnvironmentError, e:
9474                                                         if e.errno != errno.ENOENT:
9475                                                                 raise
9476                                                         del e
9477
9478                                 # Clean dir.old cruft so that they don't prevent
9479                                 # unmerge of otherwise empty directories.
9480                                 for ext in dir_extensions:
9481                                         try:
9482                                                 os.unlink(dir_file + ext + ".old")
9483                                         except EnvironmentError, e:
9484                                                 if e.errno != errno.ENOENT:
9485                                                         raise
9486                                                 del e
9487
9488                                 #update mtime so we can potentially avoid regenerating.
9489                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
9490
9491                         if badcount:
9492                                 out.eerror("Processed %d info files; %d errors." % \
9493                                         (icount, badcount))
9494                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
9495                         else:
9496                                 if icount > 0:
9497                                         out.einfo("Processed %d info files." % (icount,))
9498
9499
9500 def display_news_notification(root_config, myopts):
9501         target_root = root_config.root
9502         trees = root_config.trees
9503         settings = trees["vartree"].settings
9504         portdb = trees["porttree"].dbapi
9505         vardb = trees["vartree"].dbapi
9506         NEWS_PATH = os.path.join("metadata", "news")
9507         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
9508         newsReaderDisplay = False
9509         update = "--pretend" not in myopts
9510
9511         for repo in portdb.getRepositories():
9512                 unreadItems = checkUpdatedNewsItems(
9513                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
9514                 if unreadItems:
9515                         if not newsReaderDisplay:
9516                                 newsReaderDisplay = True
9517                                 print
9518                         print colorize("WARN", " * IMPORTANT:"),
9519                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
9520                         
9521         
9522         if newsReaderDisplay:
9523                 print colorize("WARN", " *"),
9524                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
9525                 print
9526
9527 def display_preserved_libs(vardbapi):
9528         MAX_DISPLAY = 3
9529
9530         # Ensure the registry is consistent with existing files.
9531         vardbapi.plib_registry.pruneNonExisting()
9532
9533         if vardbapi.plib_registry.hasEntries():
9534                 print
9535                 print colorize("WARN", "!!!") + " existing preserved libs:"
9536                 plibdata = vardbapi.plib_registry.getPreservedLibs()
9537                 linkmap = vardbapi.linkmap
9538                 consumer_map = {}
9539                 owners = {}
9540                 linkmap_broken = False
9541
9542                 try:
9543                         linkmap.rebuild()
9544                 except portage.exception.CommandNotFound, e:
9545                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
9546                                 level=logging.ERROR, noiselevel=-1)
9547                         del e
9548                         linkmap_broken = True
9549                 else:
9550                         search_for_owners = set()
9551                         for cpv in plibdata:
9552                                 internal_plib_keys = set(linkmap._obj_key(f) \
9553                                         for f in plibdata[cpv])
9554                                 for f in plibdata[cpv]:
9555                                         if f in consumer_map:
9556                                                 continue
9557                                         consumers = []
9558                                         for c in linkmap.findConsumers(f):
9559                                                 # Filter out any consumers that are also preserved libs
9560                                                 # belonging to the same package as the provider.
9561                                                 if linkmap._obj_key(c) not in internal_plib_keys:
9562                                                         consumers.append(c)
9563                                         consumers.sort()
9564                                         consumer_map[f] = consumers
9565                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
9566
9567                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
9568
9569                 for cpv in plibdata:
9570                         print colorize("WARN", ">>>") + " package: %s" % cpv
9571                         samefile_map = {}
9572                         for f in plibdata[cpv]:
9573                                 obj_key = linkmap._obj_key(f)
9574                                 alt_paths = samefile_map.get(obj_key)
9575                                 if alt_paths is None:
9576                                         alt_paths = set()
9577                                         samefile_map[obj_key] = alt_paths
9578                                 alt_paths.add(f)
9579
9580                         for alt_paths in samefile_map.itervalues():
9581                                 alt_paths = sorted(alt_paths)
9582                                 for p in alt_paths:
9583                                         print colorize("WARN", " * ") + " - %s" % (p,)
9584                                 f = alt_paths[0]
9585                                 consumers = consumer_map.get(f, [])
9586                                 for c in consumers[:MAX_DISPLAY]:
9587                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
9588                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
9589                                 if len(consumers) == MAX_DISPLAY + 1:
9590                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
9591                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
9592                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
9593                                 elif len(consumers) > MAX_DISPLAY:
9594                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
9595                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
9596
9597
9598 def _flush_elog_mod_echo():
9599         """
9600         Dump the mod_echo output now so that our other
9601         notifications are shown last.
9602         @rtype: bool
9603         @returns: True if messages were shown, False otherwise.
9604         """
9605         messages_shown = False
9606         try:
9607                 from portage.elog import mod_echo
9608         except ImportError:
9609                 pass # happens during downgrade to a version without the module
9610         else:
9611                 messages_shown = bool(mod_echo._items)
9612                 mod_echo.finalize()
9613         return messages_shown
9614
9615 def post_emerge(root_config, myopts, mtimedb, retval):
9616         """
9617         Misc. things to run at the end of a merge session.
9618         
9619         Update Info Files
9620         Update Config Files
9621         Update News Items
9622         Commit mtimeDB
9623         Display preserved libs warnings
9624         Exit Emerge
9625
9626         @param trees: A dictionary mapping each ROOT to it's package databases
9627         @type trees: dict
9628         @param mtimedb: The mtimeDB to store data needed across merge invocations
9629         @type mtimedb: MtimeDB class instance
9630         @param retval: Emerge's return value
9631         @type retval: Int
9632         @rype: None
9633         @returns:
9634         1.  Calls sys.exit(retval)
9635         """
9636
9637         target_root = root_config.root
9638         trees = { target_root : root_config.trees }
9639         vardbapi = trees[target_root]["vartree"].dbapi
9640         settings = vardbapi.settings
9641         info_mtimes = mtimedb["info"]
9642
9643         # Load the most current variables from ${ROOT}/etc/profile.env
9644         settings.unlock()
9645         settings.reload()
9646         settings.regenerate()
9647         settings.lock()
9648
9649         config_protect = settings.get("CONFIG_PROTECT","").split()
9650         infodirs = settings.get("INFOPATH","").split(":") + \
9651                 settings.get("INFODIR","").split(":")
9652
9653         os.chdir("/")
9654
9655         if retval == os.EX_OK:
9656                 exit_msg = " *** exiting successfully."
9657         else:
9658                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
9659         emergelog("notitles" not in settings.features, exit_msg)
9660
9661         _flush_elog_mod_echo()
9662
9663         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
9664         if "--pretend" in myopts or (counter_hash is not None and \
9665                 counter_hash == vardbapi._counter_hash()):
9666                 display_news_notification(root_config, myopts)
9667                 # If vdb state has not changed then there's nothing else to do.
9668                 sys.exit(retval)
9669
9670         vdb_path = os.path.join(target_root, portage.VDB_PATH)
9671         portage.util.ensure_dirs(vdb_path)
9672         vdb_lock = None
9673         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
9674                 vdb_lock = portage.locks.lockdir(vdb_path)
9675
9676         if vdb_lock:
9677                 try:
9678                         if "noinfo" not in settings.features:
9679                                 chk_updated_info_files(target_root,
9680                                         infodirs, info_mtimes, retval)
9681                         mtimedb.commit()
9682                 finally:
9683                         if vdb_lock:
9684                                 portage.locks.unlockdir(vdb_lock)
9685
9686         chk_updated_cfg_files(target_root, config_protect)
9687         
9688         display_news_notification(root_config, myopts)
9689         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
9690                 display_preserved_libs(vardbapi)        
9691
9692         sys.exit(retval)
9693
9694
9695 def chk_updated_cfg_files(target_root, config_protect):
9696         if config_protect:
9697                 #number of directories with some protect files in them
9698                 procount=0
9699                 for x in config_protect:
9700                         x = os.path.join(target_root, x.lstrip(os.path.sep))
9701                         if not os.access(x, os.W_OK):
9702                                 # Avoid Permission denied errors generated
9703                                 # later by `find`.
9704                                 continue
9705                         try:
9706                                 mymode = os.lstat(x).st_mode
9707                         except OSError:
9708                                 continue
9709                         if stat.S_ISLNK(mymode):
9710                                 # We want to treat it like a directory if it
9711                                 # is a symlink to an existing directory.
9712                                 try:
9713                                         real_mode = os.stat(x).st_mode
9714                                         if stat.S_ISDIR(real_mode):
9715                                                 mymode = real_mode
9716                                 except OSError:
9717                                         pass
9718                         if stat.S_ISDIR(mymode):
9719                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
9720                         else:
9721                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
9722                                         os.path.split(x.rstrip(os.path.sep))
9723                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
9724                         a = commands.getstatusoutput(mycommand)
9725                         if a[0] != 0:
9726                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
9727                                 sys.stderr.flush()
9728                                 # Show the error message alone, sending stdout to /dev/null.
9729                                 os.system(mycommand + " 1>/dev/null")
9730                         else:
9731                                 files = a[1].split('\0')
9732                                 # split always produces an empty string as the last element
9733                                 if files and not files[-1]:
9734                                         del files[-1]
9735                                 if files:
9736                                         procount += 1
9737                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
9738                                         if stat.S_ISDIR(mymode):
9739                                                  print "%d config files in '%s' need updating." % \
9740                                                         (len(files), x)
9741                                         else:
9742                                                  print "config file '%s' needs updating." % x
9743
9744                 if procount:
9745                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
9746                                 " section of the " + bold("emerge")
9747                         print " "+yellow("*")+" man page to learn how to update config files."
9748
9749 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
9750         update=False):
9751         """
9752         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
9753         Returns the number of unread (yet relevent) items.
9754         
9755         @param portdb: a portage tree database
9756         @type portdb: pordbapi
9757         @param vardb: an installed package database
9758         @type vardb: vardbapi
9759         @param NEWS_PATH:
9760         @type NEWS_PATH:
9761         @param UNREAD_PATH:
9762         @type UNREAD_PATH:
9763         @param repo_id:
9764         @type repo_id:
9765         @rtype: Integer
9766         @returns:
9767         1.  The number of unread but relevant news items.
9768         
9769         """
9770         from portage.news import NewsManager
9771         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
9772         return manager.getUnreadItems( repo_id, update=update )
9773
9774 def insert_category_into_atom(atom, category):
9775         alphanum = re.search(r'\w', atom)
9776         if alphanum:
9777                 ret = atom[:alphanum.start()] + "%s/" % category + \
9778                         atom[alphanum.start():]
9779         else:
9780                 ret = None
9781         return ret
9782
9783 def is_valid_package_atom(x):
9784         if "/" not in x:
9785                 alphanum = re.search(r'\w', x)
9786                 if alphanum:
9787                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
9788         return portage.isvalidatom(x)
9789
9790 def show_blocker_docs_link():
9791         print
9792         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
9793         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
9794         print
9795         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
9796         print
9797
9798 def show_mask_docs():
9799         print "For more information, see the MASKED PACKAGES section in the emerge"
9800         print "man page or refer to the Gentoo Handbook."
9801
9802 def action_sync(settings, trees, mtimedb, myopts, myaction):
9803         xterm_titles = "notitles" not in settings.features
9804         emergelog(xterm_titles, " === sync")
9805         portdb = trees[settings["ROOT"]]["porttree"].dbapi
9806         myportdir = portdb.porttree_root
9807         out = portage.output.EOutput()
9808         if not myportdir:
9809                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
9810                 sys.exit(1)
9811         if myportdir[-1]=="/":
9812                 myportdir=myportdir[:-1]
9813         try:
9814                 st = os.stat(myportdir)
9815         except OSError:
9816                 st = None
9817         if st is None:
9818                 print ">>>",myportdir,"not found, creating it."
9819                 os.makedirs(myportdir,0755)
9820                 st = os.stat(myportdir)
9821
9822         spawn_kwargs = {}
9823         spawn_kwargs["env"] = settings.environ()
9824         if 'usersync' in settings.features and \
9825                 portage.data.secpass >= 2 and \
9826                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
9827                 st.st_gid != os.getgid() and st.st_mode & 0070):
9828                 try:
9829                         homedir = pwd.getpwuid(st.st_uid).pw_dir
9830                 except KeyError:
9831                         pass
9832                 else:
9833                         # Drop privileges when syncing, in order to match
9834                         # existing uid/gid settings.
9835                         spawn_kwargs["uid"]    = st.st_uid
9836                         spawn_kwargs["gid"]    = st.st_gid
9837                         spawn_kwargs["groups"] = [st.st_gid]
9838                         spawn_kwargs["env"]["HOME"] = homedir
9839                         umask = 0002
9840                         if not st.st_mode & 0020:
9841                                 umask = umask | 0020
9842                         spawn_kwargs["umask"] = umask
9843
9844         syncuri = settings.get("SYNC", "").strip()
9845         if not syncuri:
9846                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
9847                         noiselevel=-1, level=logging.ERROR)
9848                 return 1
9849
9850         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
9851         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
9852
9853         os.umask(0022)
9854         dosyncuri = syncuri
9855         updatecache_flg = False
9856         if myaction == "metadata":
9857                 print "skipping sync"
9858                 updatecache_flg = True
9859         elif ".git" in vcs_dirs:
9860                 # Update existing git repository, and ignore the syncuri. We are
9861                 # going to trust the user and assume that the user is in the branch
9862                 # that he/she wants updated. We'll let the user manage branches with
9863                 # git directly.
9864                 if portage.process.find_binary("git") is None:
9865                         msg = ["Command not found: git",
9866                         "Type \"emerge dev-util/git\" to enable git support."]
9867                         for l in msg:
9868                                 writemsg_level("!!! %s\n" % l,
9869                                         level=logging.ERROR, noiselevel=-1)
9870                         return 1
9871                 msg = ">>> Starting git pull in %s..." % myportdir
9872                 emergelog(xterm_titles, msg )
9873                 writemsg_level(msg + "\n")
9874                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
9875                         (portage._shell_quote(myportdir),), **spawn_kwargs)
9876                 if exitcode != os.EX_OK:
9877                         msg = "!!! git pull error in %s." % myportdir
9878                         emergelog(xterm_titles, msg)
9879                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
9880                         return exitcode
9881                 msg = ">>> Git pull in %s successful" % myportdir
9882                 emergelog(xterm_titles, msg)
9883                 writemsg_level(msg + "\n")
9884                 exitcode = git_sync_timestamps(settings, myportdir)
9885                 if exitcode == os.EX_OK:
9886                         updatecache_flg = True
9887         elif syncuri[:8]=="rsync://":
9888                 for vcs_dir in vcs_dirs:
9889                         writemsg_level(("!!! %s appears to be under revision " + \
9890                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
9891                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
9892                         return 1
9893                 if not os.path.exists("/usr/bin/rsync"):
9894                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
9895                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
9896                         sys.exit(1)
9897                 mytimeout=180
9898
9899                 rsync_opts = []
9900                 if settings["PORTAGE_RSYNC_OPTS"] == "":
9901                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
9902                         rsync_opts.extend([
9903                                 "--recursive",    # Recurse directories
9904                                 "--links",        # Consider symlinks
9905                                 "--safe-links",   # Ignore links outside of tree
9906                                 "--perms",        # Preserve permissions
9907                                 "--times",        # Preserive mod times
9908                                 "--compress",     # Compress the data transmitted
9909                                 "--force",        # Force deletion on non-empty dirs
9910                                 "--whole-file",   # Don't do block transfers, only entire files
9911                                 "--delete",       # Delete files that aren't in the master tree
9912                                 "--stats",        # Show final statistics about what was transfered
9913                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
9914                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
9915                                 "--exclude=/local",       # Exclude local     from consideration
9916                                 "--exclude=/packages",    # Exclude packages  from consideration
9917                         ])
9918
9919                 else:
9920                         # The below validation is not needed when using the above hardcoded
9921                         # defaults.
9922
9923                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
9924                         rsync_opts.extend(
9925                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
9926                         for opt in ("--recursive", "--times"):
9927                                 if opt not in rsync_opts:
9928                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
9929                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9930                                         rsync_opts.append(opt)
9931         
9932                         for exclude in ("distfiles", "local", "packages"):
9933                                 opt = "--exclude=/%s" % exclude
9934                                 if opt not in rsync_opts:
9935                                         portage.writemsg(yellow("WARNING:") + \
9936                                         " adding required option %s not included in "  % opt + \
9937                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
9938                                         rsync_opts.append(opt)
9939         
9940                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
9941                                 def rsync_opt_startswith(opt_prefix):
9942                                         for x in rsync_opts:
9943                                                 if x.startswith(opt_prefix):
9944                                                         return True
9945                                         return False
9946
9947                                 if not rsync_opt_startswith("--timeout="):
9948                                         rsync_opts.append("--timeout=%d" % mytimeout)
9949
9950                                 for opt in ("--compress", "--whole-file"):
9951                                         if opt not in rsync_opts:
9952                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
9953                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
9954                                                 rsync_opts.append(opt)
9955
9956                 if "--quiet" in myopts:
9957                         rsync_opts.append("--quiet")    # Shut up a lot
9958                 else:
9959                         rsync_opts.append("--verbose")  # Print filelist
9960
9961                 if "--verbose" in myopts:
9962                         rsync_opts.append("--progress")  # Progress meter for each file
9963
9964                 if "--debug" in myopts:
9965                         rsync_opts.append("--checksum") # Force checksum on all files
9966
9967                 # Real local timestamp file.
9968                 servertimestampfile = os.path.join(
9969                         myportdir, "metadata", "timestamp.chk")
9970
9971                 content = portage.util.grabfile(servertimestampfile)
9972                 mytimestamp = 0
9973                 if content:
9974                         try:
9975                                 mytimestamp = time.mktime(time.strptime(content[0],
9976                                         "%a, %d %b %Y %H:%M:%S +0000"))
9977                         except (OverflowError, ValueError):
9978                                 pass
9979                 del content
9980
9981                 try:
9982                         rsync_initial_timeout = \
9983                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
9984                 except ValueError:
9985                         rsync_initial_timeout = 15
9986
9987                 try:
9988                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
9989                 except SystemExit, e:
9990                         raise # Needed else can't exit
9991                 except:
9992                         maxretries=3 #default number of retries
9993
9994                 retries=0
9995                 user_name, hostname, port = re.split(
9996                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
9997                 if port is None:
9998                         port=""
9999                 if user_name is None:
10000                         user_name=""
10001                 updatecache_flg=True
10002                 all_rsync_opts = set(rsync_opts)
10003                 extra_rsync_opts = shlex.split(
10004                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
10005                 all_rsync_opts.update(extra_rsync_opts)
10006                 family = socket.AF_INET
10007                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
10008                         family = socket.AF_INET
10009                 elif socket.has_ipv6 and \
10010                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
10011                         family = socket.AF_INET6
10012                 ips=[]
10013                 SERVER_OUT_OF_DATE = -1
10014                 EXCEEDED_MAX_RETRIES = -2
10015                 while (1):
10016                         if ips:
10017                                 del ips[0]
10018                         if ips==[]:
10019                                 try:
10020                                         for addrinfo in socket.getaddrinfo(
10021                                                 hostname, None, family, socket.SOCK_STREAM):
10022                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
10023                                                         # IPv6 addresses need to be enclosed in square brackets
10024                                                         ips.append("[%s]" % addrinfo[4][0])
10025                                                 else:
10026                                                         ips.append(addrinfo[4][0])
10027                                         from random import shuffle
10028                                         shuffle(ips)
10029                                 except SystemExit, e:
10030                                         raise # Needed else can't exit
10031                                 except Exception, e:
10032                                         print "Notice:",str(e)
10033                                         dosyncuri=syncuri
10034
10035                         if ips:
10036                                 try:
10037                                         dosyncuri = syncuri.replace(
10038                                                 "//" + user_name + hostname + port + "/",
10039                                                 "//" + user_name + ips[0] + port + "/", 1)
10040                                 except SystemExit, e:
10041                                         raise # Needed else can't exit
10042                                 except Exception, e:
10043                                         print "Notice:",str(e)
10044                                         dosyncuri=syncuri
10045
10046                         if (retries==0):
10047                                 if "--ask" in myopts:
10048                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
10049                                                 print
10050                                                 print "Quitting."
10051                                                 print
10052                                                 sys.exit(0)
10053                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
10054                                 if "--quiet" not in myopts:
10055                                         print ">>> Starting rsync with "+dosyncuri+"..."
10056                         else:
10057                                 emergelog(xterm_titles,
10058                                         ">>> Starting retry %d of %d with %s" % \
10059                                                 (retries,maxretries,dosyncuri))
10060                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
10061
10062                         if mytimestamp != 0 and "--quiet" not in myopts:
10063                                 print ">>> Checking server timestamp ..."
10064
10065                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
10066
10067                         if "--debug" in myopts:
10068                                 print rsynccommand
10069
10070                         exitcode = os.EX_OK
10071                         servertimestamp = 0
10072                         # Even if there's no timestamp available locally, fetch the
10073                         # timestamp anyway as an initial probe to verify that the server is
10074                         # responsive.  This protects us from hanging indefinitely on a
10075                         # connection attempt to an unresponsive server which rsync's
10076                         # --timeout option does not prevent.
10077                         if True:
10078                                 # Temporary file for remote server timestamp comparison.
10079                                 from tempfile import mkstemp
10080                                 fd, tmpservertimestampfile = mkstemp()
10081                                 os.close(fd)
10082                                 mycommand = rsynccommand[:]
10083                                 mycommand.append(dosyncuri.rstrip("/") + \
10084                                         "/metadata/timestamp.chk")
10085                                 mycommand.append(tmpservertimestampfile)
10086                                 content = None
10087                                 mypids = []
10088                                 try:
10089                                         def timeout_handler(signum, frame):
10090                                                 raise portage.exception.PortageException("timed out")
10091                                         signal.signal(signal.SIGALRM, timeout_handler)
10092                                         # Timeout here in case the server is unresponsive.  The
10093                                         # --timeout rsync option doesn't apply to the initial
10094                                         # connection attempt.
10095                                         if rsync_initial_timeout:
10096                                                 signal.alarm(rsync_initial_timeout)
10097                                         try:
10098                                                 mypids.extend(portage.process.spawn(
10099                                                         mycommand, env=settings.environ(), returnpid=True))
10100                                                 exitcode = os.waitpid(mypids[0], 0)[1]
10101                                                 content = portage.grabfile(tmpservertimestampfile)
10102                                         finally:
10103                                                 if rsync_initial_timeout:
10104                                                         signal.alarm(0)
10105                                                 try:
10106                                                         os.unlink(tmpservertimestampfile)
10107                                                 except OSError:
10108                                                         pass
10109                                 except portage.exception.PortageException, e:
10110                                         # timed out
10111                                         print e
10112                                         del e
10113                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
10114                                                 os.kill(mypids[0], signal.SIGTERM)
10115                                                 os.waitpid(mypids[0], 0)
10116                                         # This is the same code rsync uses for timeout.
10117                                         exitcode = 30
10118                                 else:
10119                                         if exitcode != os.EX_OK:
10120                                                 if exitcode & 0xff:
10121                                                         exitcode = (exitcode & 0xff) << 8
10122                                                 else:
10123                                                         exitcode = exitcode >> 8
10124                                 if mypids:
10125                                         portage.process.spawned_pids.remove(mypids[0])
10126                                 if content:
10127                                         try:
10128                                                 servertimestamp = time.mktime(time.strptime(
10129                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
10130                                         except (OverflowError, ValueError):
10131                                                 pass
10132                                 del mycommand, mypids, content
10133                         if exitcode == os.EX_OK:
10134                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
10135                                         emergelog(xterm_titles,
10136                                                 ">>> Cancelling sync -- Already current.")
10137                                         print
10138                                         print ">>>"
10139                                         print ">>> Timestamps on the server and in the local repository are the same."
10140                                         print ">>> Cancelling all further sync action. You are already up to date."
10141                                         print ">>>"
10142                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
10143                                         print ">>>"
10144                                         print
10145                                         sys.exit(0)
10146                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
10147                                         emergelog(xterm_titles,
10148                                                 ">>> Server out of date: %s" % dosyncuri)
10149                                         print
10150                                         print ">>>"
10151                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
10152                                         print ">>>"
10153                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
10154                                         print ">>>"
10155                                         print
10156                                         exitcode = SERVER_OUT_OF_DATE
10157                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
10158                                         # actual sync
10159                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
10160                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
10161                                         if exitcode in [0,1,3,4,11,14,20,21]:
10162                                                 break
10163                         elif exitcode in [1,3,4,11,14,20,21]:
10164                                 break
10165                         else:
10166                                 # Code 2 indicates protocol incompatibility, which is expected
10167                                 # for servers with protocol < 29 that don't support
10168                                 # --prune-empty-directories.  Retry for a server that supports
10169                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
10170                                 pass
10171
10172                         retries=retries+1
10173
10174                         if retries<=maxretries:
10175                                 print ">>> Retrying..."
10176                                 time.sleep(11)
10177                         else:
10178                                 # over retries
10179                                 # exit loop
10180                                 updatecache_flg=False
10181                                 exitcode = EXCEEDED_MAX_RETRIES
10182                                 break
10183
10184                 if (exitcode==0):
10185                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
10186                 elif exitcode == SERVER_OUT_OF_DATE:
10187                         sys.exit(1)
10188                 elif exitcode == EXCEEDED_MAX_RETRIES:
10189                         sys.stderr.write(
10190                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
10191                         sys.exit(1)
10192                 elif (exitcode>0):
10193                         msg = []
10194                         if exitcode==1:
10195                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
10196                                 msg.append("that your SYNC statement is proper.")
10197                                 msg.append("SYNC=" + settings["SYNC"])
10198                         elif exitcode==11:
10199                                 msg.append("Rsync has reported that there is a File IO error. Normally")
10200                                 msg.append("this means your disk is full, but can be caused by corruption")
10201                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
10202                                 msg.append("and try again after the problem has been fixed.")
10203                                 msg.append("PORTDIR=" + settings["PORTDIR"])
10204                         elif exitcode==20:
10205                                 msg.append("Rsync was killed before it finished.")
10206                         else:
10207                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
10208                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
10209                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
10210                                 msg.append("temporary problem unless complications exist with your network")
10211                                 msg.append("(and possibly your system's filesystem) configuration.")
10212                         for line in msg:
10213                                 out.eerror(line)
10214                         sys.exit(exitcode)
10215         elif syncuri[:6]=="cvs://":
10216                 if not os.path.exists("/usr/bin/cvs"):
10217                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
10218                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
10219                         sys.exit(1)
10220                 cvsroot=syncuri[6:]
10221                 cvsdir=os.path.dirname(myportdir)
10222                 if not os.path.exists(myportdir+"/CVS"):
10223                         #initial checkout
10224                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
10225                         if os.path.exists(cvsdir+"/gentoo-x86"):
10226                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
10227                                 sys.exit(1)
10228                         try:
10229                                 os.rmdir(myportdir)
10230                         except OSError, e:
10231                                 if e.errno != errno.ENOENT:
10232                                         sys.stderr.write(
10233                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
10234                                         sys.exit(1)
10235                                 del e
10236                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
10237                                 print "!!! cvs checkout error; exiting."
10238                                 sys.exit(1)
10239                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
10240                 else:
10241                         #cvs update
10242                         print ">>> Starting cvs update with "+syncuri+"..."
10243                         retval = portage.process.spawn_bash(
10244                                 "cd %s; cvs -z0 -q update -dP" % \
10245                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
10246                         if retval != os.EX_OK:
10247                                 sys.exit(retval)
10248                 dosyncuri = syncuri
10249         else:
10250                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
10251                         noiselevel=-1, level=logging.ERROR)
10252                 return 1
10253
10254         if updatecache_flg and  \
10255                 myaction != "metadata" and \
10256                 "metadata-transfer" not in settings.features:
10257                 updatecache_flg = False
10258
10259         # Reload the whole config from scratch.
10260         settings, trees, mtimedb = load_emerge_config(trees=trees)
10261         root_config = trees[settings["ROOT"]]["root_config"]
10262         portdb = trees[settings["ROOT"]]["porttree"].dbapi
10263
10264         if updatecache_flg and \
10265                 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
10266
10267                 # Only update cache for myportdir since that's
10268                 # the only one that's been synced here.
10269                 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
10270
10271         if portage._global_updates(trees, mtimedb["updates"]):
10272                 mtimedb.commit()
10273                 # Reload the whole config from scratch.
10274                 settings, trees, mtimedb = load_emerge_config(trees=trees)
10275                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
10276                 root_config = trees[settings["ROOT"]]["root_config"]
10277
10278         mybestpv = portdb.xmatch("bestmatch-visible",
10279                 portage.const.PORTAGE_PACKAGE_ATOM)
10280         mypvs = portage.best(
10281                 trees[settings["ROOT"]]["vartree"].dbapi.match(
10282                 portage.const.PORTAGE_PACKAGE_ATOM))
10283
10284         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
10285
10286         if myaction != "metadata":
10287                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
10288                         retval = portage.process.spawn(
10289                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
10290                                 dosyncuri], env=settings.environ())
10291                         if retval != os.EX_OK:
10292                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
10293
10294         if(mybestpv != mypvs) and not "--quiet" in myopts:
10295                 print
10296                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
10297                 print red(" * ")+"that you update portage now, before any other packages are updated."
10298                 print
10299                 print red(" * ")+"To update portage, run 'emerge portage' now."
10300                 print
10301         
10302         display_news_notification(root_config, myopts)
10303         return os.EX_OK
10304
10305 def git_sync_timestamps(settings, portdir):
10306         """
10307         Since git doesn't preserve timestamps, synchronize timestamps between
10308         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
10309         for a given file as long as the file in the working tree is not modified
10310         (relative to HEAD).
10311         """
10312         cache_dir = os.path.join(portdir, "metadata", "cache")
10313         if not os.path.isdir(cache_dir):
10314                 return os.EX_OK
10315         writemsg_level(">>> Synchronizing timestamps...\n")
10316
10317         from portage.cache.cache_errors import CacheError
10318         try:
10319                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
10320                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
10321         except CacheError, e:
10322                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
10323                         level=logging.ERROR, noiselevel=-1)
10324                 return 1
10325
10326         ec_dir = os.path.join(portdir, "eclass")
10327         try:
10328                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
10329                         if f.endswith(".eclass"))
10330         except OSError, e:
10331                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
10332                         level=logging.ERROR, noiselevel=-1)
10333                 return 1
10334
10335         args = [portage.const.BASH_BINARY, "-c",
10336                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
10337                 portage._shell_quote(portdir)]
10338         import subprocess
10339         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
10340         modified_files = set(l.rstrip("\n") for l in proc.stdout)
10341         rval = proc.wait()
10342         if rval != os.EX_OK:
10343                 return rval
10344
10345         modified_eclasses = set(ec for ec in ec_names \
10346                 if os.path.join("eclass", ec + ".eclass") in modified_files)
10347
10348         updated_ec_mtimes = {}
10349
10350         for cpv in cache_db:
10351                 cpv_split = portage.catpkgsplit(cpv)
10352                 if cpv_split is None:
10353                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
10354                                 level=logging.ERROR, noiselevel=-1)
10355                         continue
10356
10357                 cat, pn, ver, rev = cpv_split
10358                 cat, pf = portage.catsplit(cpv)
10359                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
10360                 if relative_eb_path in modified_files:
10361                         continue
10362
10363                 try:
10364                         cache_entry = cache_db[cpv]
10365                         eb_mtime = cache_entry.get("_mtime_")
10366                         ec_mtimes = cache_entry.get("_eclasses_")
10367                 except KeyError:
10368                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
10369                                 level=logging.ERROR, noiselevel=-1)
10370                         continue
10371                 except CacheError, e:
10372                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
10373                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
10374                         continue
10375
10376                 if eb_mtime is None:
10377                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
10378                                 level=logging.ERROR, noiselevel=-1)
10379                         continue
10380
10381                 try:
10382                         eb_mtime = long(eb_mtime)
10383                 except ValueError:
10384                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
10385                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
10386                         continue
10387
10388                 if ec_mtimes is None:
10389                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
10390                                 level=logging.ERROR, noiselevel=-1)
10391                         continue
10392
10393                 if modified_eclasses.intersection(ec_mtimes):
10394                         continue
10395
10396                 missing_eclasses = set(ec_mtimes).difference(ec_names)
10397                 if missing_eclasses:
10398                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
10399                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
10400                                 noiselevel=-1)
10401                         continue
10402
10403                 eb_path = os.path.join(portdir, relative_eb_path)
10404                 try:
10405                         current_eb_mtime = os.stat(eb_path)
10406                 except OSError:
10407                         writemsg_level("!!! Missing ebuild: %s\n" % \
10408                                 (cpv,), level=logging.ERROR, noiselevel=-1)
10409                         continue
10410
10411                 inconsistent = False
10412                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10413                         updated_mtime = updated_ec_mtimes.get(ec)
10414                         if updated_mtime is not None and updated_mtime != ec_mtime:
10415                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
10416                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
10417                                 inconsistent = True
10418                                 break
10419
10420                 if inconsistent:
10421                         continue
10422
10423                 if current_eb_mtime != eb_mtime:
10424                         os.utime(eb_path, (eb_mtime, eb_mtime))
10425
10426                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
10427                         if ec in updated_ec_mtimes:
10428                                 continue
10429                         ec_path = os.path.join(ec_dir, ec + ".eclass")
10430                         current_mtime = long(os.stat(ec_path).st_mtime)
10431                         if current_mtime != ec_mtime:
10432                                 os.utime(ec_path, (ec_mtime, ec_mtime))
10433                         updated_ec_mtimes[ec] = ec_mtime
10434
10435         return os.EX_OK
10436
10437 def action_metadata(settings, portdb, myopts, porttrees=None):
10438         if porttrees is None:
10439                 porttrees = portdb.porttrees
10440         portage.writemsg_stdout("\n>>> Updating Portage cache\n")
10441         old_umask = os.umask(0002)
10442         cachedir = os.path.normpath(settings.depcachedir)
10443         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
10444                                         "/lib", "/opt", "/proc", "/root", "/sbin",
10445                                         "/sys", "/tmp", "/usr",  "/var"]:
10446                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
10447                         "ROOT DIRECTORY ON YOUR SYSTEM."
10448                 print >> sys.stderr, \
10449                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
10450                 sys.exit(73)
10451         if not os.path.exists(cachedir):
10452                 os.makedirs(cachedir)
10453
10454         auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
10455         auxdbkeys = tuple(auxdbkeys)
10456
10457         class TreeData(object):
10458                 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
10459                 def __init__(self, dest_db, eclass_db, path, src_db):
10460                         self.dest_db = dest_db
10461                         self.eclass_db = eclass_db
10462                         self.path = path
10463                         self.src_db = src_db
10464                         self.valid_nodes = set()
10465
10466         porttrees_data = []
10467         for path in porttrees:
10468                 src_db = portdb._pregen_auxdb.get(path)
10469                 if src_db is None and \
10470                         os.path.isdir(os.path.join(path, 'metadata', 'cache')):
10471                         src_db = portdb.metadbmodule(
10472                                 path, 'metadata/cache', auxdbkeys, readonly=True)
10473                         try:
10474                                 src_db.ec = portdb._repo_info[path].eclass_db
10475                         except AttributeError:
10476                                 pass
10477
10478                 if src_db is not None:
10479                         porttrees_data.append(TreeData(portdb.auxdb[path],
10480                                 portdb._repo_info[path].eclass_db, path, src_db))
10481
10482         porttrees = [tree_data.path for tree_data in porttrees_data]
10483
10484         isatty = sys.stdout.isatty()
10485         quiet = not isatty or '--quiet' in myopts
10486         onProgress = None
10487         if not quiet:
10488                 progressBar = portage.output.TermProgressBar()
10489                 progressHandler = ProgressHandler()
10490                 onProgress = progressHandler.onProgress
10491                 def display():
10492                         progressBar.set(progressHandler.curval, progressHandler.maxval)
10493                 progressHandler.display = display
10494                 def sigwinch_handler(signum, frame):
10495                         lines, progressBar.term_columns = \
10496                                 portage.output.get_term_size()
10497                 signal.signal(signal.SIGWINCH, sigwinch_handler)
10498
10499         # Temporarily override portdb.porttrees so portdb.cp_all()
10500         # will only return the relevant subset.
10501         portdb_porttrees = portdb.porttrees
10502         portdb.porttrees = porttrees
10503         try:
10504                 cp_all = portdb.cp_all()
10505         finally:
10506                 portdb.porttrees = portdb_porttrees
10507
10508         curval = 0
10509         maxval = len(cp_all)
10510         if onProgress is not None:
10511                 onProgress(maxval, curval)
10512
10513         from portage.cache.util import quiet_mirroring
10514         from portage import eapi_is_supported, \
10515                 _validate_cache_for_unsupported_eapis
10516
10517         # TODO: Display error messages, but do not interfere with the progress bar.
10518         # Here's how:
10519         #  1) erase the progress bar
10520         #  2) show the error message
10521         #  3) redraw the progress bar on a new line
10522         noise = quiet_mirroring()
10523
10524         for cp in cp_all:
10525                 for tree_data in porttrees_data:
10526                         for cpv in portdb.cp_list(cp, mytree=tree_data.path):
10527                                 tree_data.valid_nodes.add(cpv)
10528                                 try:
10529                                         src = tree_data.src_db[cpv]
10530                                 except KeyError, e:
10531                                         noise.missing_entry(cpv)
10532                                         del e
10533                                         continue
10534                                 except CacheError, ce:
10535                                         noise.exception(cpv, ce)
10536                                         del ce
10537                                         continue
10538
10539                                 eapi = src.get('EAPI')
10540                                 if not eapi:
10541                                         eapi = '0'
10542                                 eapi = eapi.lstrip('-')
10543                                 eapi_supported = eapi_is_supported(eapi)
10544                                 if not eapi_supported:
10545                                         if not _validate_cache_for_unsupported_eapis:
10546                                                 noise.misc(cpv, "unable to validate " + \
10547                                                         "cache for EAPI='%s'" % eapi)
10548                                                 continue
10549
10550                                 dest = None
10551                                 try:
10552                                         dest = tree_data.dest_db[cpv]
10553                                 except (KeyError, CacheError):
10554                                         pass
10555
10556                                 for d in (src, dest):
10557                                         if d is not None and d.get('EAPI') in ('', '0'):
10558                                                 del d['EAPI']
10559
10560                                 if dest is not None:
10561                                         if not (dest['_mtime_'] == src['_mtime_'] and \
10562                                                 tree_data.eclass_db.is_eclass_data_valid(
10563                                                         dest['_eclasses_']) and \
10564                                                 set(dest['_eclasses_']) == set(src['_eclasses_'])):
10565                                                 dest = None
10566                                         else:
10567                                                 # We don't want to skip the write unless we're really
10568                                                 # sure that the existing cache is identical, so don't
10569                                                 # trust _mtime_ and _eclasses_ alone.
10570                                                 for k in set(chain(src, dest)).difference(
10571                                                         ('_mtime_', '_eclasses_')):
10572                                                         if dest.get(k, '') != src.get(k, ''):
10573                                                                 dest = None
10574                                                                 break
10575
10576                                 if dest is not None:
10577                                         # The existing data is valid and identical,
10578                                         # so there's no need to overwrite it.
10579                                         continue
10580
10581                                 try:
10582                                         inherited = src.get('INHERITED', '')
10583                                         eclasses = src.get('_eclasses_')
10584                                 except CacheError, ce:
10585                                         noise.exception(cpv, ce)
10586                                         del ce
10587                                         continue
10588
10589                                 if eclasses is not None:
10590                                         if not tree_data.eclass_db.is_eclass_data_valid(
10591                                                 src['_eclasses_']):
10592                                                 noise.eclass_stale(cpv)
10593                                                 continue
10594                                         inherited = eclasses
10595                                 else:
10596                                         inherited = inherited.split()
10597
10598                                 if tree_data.src_db.complete_eclass_entries and \
10599                                         eclasses is None:
10600                                         noise.corruption(cpv, "missing _eclasses_ field")
10601                                         continue
10602
10603                                 if inherited:
10604                                         # Even if _eclasses_ already exists, replace it with data from
10605                                         # eclass_cache, in order to insert local eclass paths.
10606                                         try:
10607                                                 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
10608                                         except KeyError:
10609                                                 # INHERITED contains a non-existent eclass.
10610                                                 noise.eclass_stale(cpv)
10611                                                 continue
10612
10613                                         if eclasses is None:
10614                                                 noise.eclass_stale(cpv)
10615                                                 continue
10616                                         src['_eclasses_'] = eclasses
10617                                 else:
10618                                         src['_eclasses_'] = {}
10619
10620                                 if not eapi_supported:
10621                                         src = {
10622                                                 'EAPI'       : '-' + eapi,
10623                                                 '_mtime_'    : src['_mtime_'],
10624                                                 '_eclasses_' : src['_eclasses_'],
10625                                         }
10626
10627                                 try:
10628                                         tree_data.dest_db[cpv] = src
10629                                 except CacheError, ce:
10630                                         noise.exception(cpv, ce)
10631                                         del ce
10632
10633                 curval += 1
10634                 if onProgress is not None:
10635                         onProgress(maxval, curval)
10636
10637         if onProgress is not None:
10638                 onProgress(maxval, curval)
10639
10640         for tree_data in porttrees_data:
10641                 try:
10642                         dead_nodes = set(tree_data.dest_db.iterkeys())
10643                 except CacheError, e:
10644                         writemsg_level("Error listing cache entries for " + \
10645                                 "'%s': %s, continuing...\n" % (tree_data.path, e),
10646                                 level=logging.ERROR, noiselevel=-1)
10647                         del e
10648                 else:
10649                         dead_nodes.difference_update(tree_data.valid_nodes)
10650                         for cpv in dead_nodes:
10651                                 try:
10652                                         del tree_data.dest_db[cpv]
10653                                 except (KeyError, CacheError):
10654                                         pass
10655
10656         if not quiet:
10657                 # make sure the final progress is displayed
10658                 progressHandler.display()
10659                 print
10660                 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
10661
10662         sys.stdout.flush()
10663         os.umask(old_umask)
10664
10665 def action_regen(settings, portdb, max_jobs, max_load):
10666         xterm_titles = "notitles" not in settings.features
10667         emergelog(xterm_titles, " === regen")
10668         #regenerate cache entries
10669         portage.writemsg_stdout("Regenerating cache entries...\n")
10670         try:
10671                 os.close(sys.stdin.fileno())
10672         except SystemExit, e:
10673                 raise # Needed else can't exit
10674         except:
10675                 pass
10676         sys.stdout.flush()
10677
10678         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
10679         regen.run()
10680
10681         portage.writemsg_stdout("done!\n")
10682         return regen.returncode
10683
10684 def action_config(settings, trees, myopts, myfiles):
10685         if len(myfiles) != 1:
10686                 print red("!!! config can only take a single package atom at this time\n")
10687                 sys.exit(1)
10688         if not is_valid_package_atom(myfiles[0]):
10689                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
10690                         noiselevel=-1)
10691                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
10692                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
10693                 sys.exit(1)
10694         print
10695         try:
10696                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
10697         except portage.exception.AmbiguousPackageName, e:
10698                 # Multiple matches thrown from cpv_expand
10699                 pkgs = e.args[0]
10700         if len(pkgs) == 0:
10701                 print "No packages found.\n"
10702                 sys.exit(0)
10703         elif len(pkgs) > 1:
10704                 if "--ask" in myopts:
10705                         options = []
10706                         print "Please select a package to configure:"
10707                         idx = 0
10708                         for pkg in pkgs:
10709                                 idx += 1
10710                                 options.append(str(idx))
10711                                 print options[-1]+") "+pkg
10712                         print "X) Cancel"
10713                         options.append("X")
10714                         idx = userquery("Selection?", options)
10715                         if idx == "X":
10716                                 sys.exit(0)
10717                         pkg = pkgs[int(idx)-1]
10718                 else:
10719                         print "The following packages available:"
10720                         for pkg in pkgs:
10721                                 print "* "+pkg
10722                         print "\nPlease use a specific atom or the --ask option."
10723                         sys.exit(1)
10724         else:
10725                 pkg = pkgs[0]
10726
10727         print
10728         if "--ask" in myopts:
10729                 if userquery("Ready to configure "+pkg+"?") == "No":
10730                         sys.exit(0)
10731         else:
10732                 print "Configuring pkg..."
10733         print
10734         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
10735         mysettings = portage.config(clone=settings)
10736         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
10737         debug = mysettings.get("PORTAGE_DEBUG") == "1"
10738         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
10739                 mysettings,
10740                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
10741                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
10742         if retval == os.EX_OK:
10743                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
10744                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
10745         print
10746
10747 def action_info(settings, trees, myopts, myfiles):
10748         print getportageversion(settings["PORTDIR"], settings["ROOT"],
10749                 settings.profile_path, settings["CHOST"],
10750                 trees[settings["ROOT"]]["vartree"].dbapi)
10751         header_width = 65
10752         header_title = "System Settings"
10753         if myfiles:
10754                 print header_width * "="
10755                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10756         print header_width * "="
10757         print "System uname: "+platform.platform(aliased=1)
10758
10759         lastSync = portage.grabfile(os.path.join(
10760                 settings["PORTDIR"], "metadata", "timestamp.chk"))
10761         print "Timestamp of tree:",
10762         if lastSync:
10763                 print lastSync[0]
10764         else:
10765                 print "Unknown"
10766
10767         output=commands.getstatusoutput("distcc --version")
10768         if not output[0]:
10769                 print str(output[1].split("\n",1)[0]),
10770                 if "distcc" in settings.features:
10771                         print "[enabled]"
10772                 else:
10773                         print "[disabled]"
10774
10775         output=commands.getstatusoutput("ccache -V")
10776         if not output[0]:
10777                 print str(output[1].split("\n",1)[0]),
10778                 if "ccache" in settings.features:
10779                         print "[enabled]"
10780                 else:
10781                         print "[disabled]"
10782
10783         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
10784                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
10785         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
10786         myvars  = portage.util.unique_array(myvars)
10787         myvars.sort()
10788
10789         for x in myvars:
10790                 if portage.isvalidatom(x):
10791                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
10792                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
10793                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
10794                         pkgs = []
10795                         for pn, ver, rev in pkg_matches:
10796                                 if rev != "r0":
10797                                         pkgs.append(ver + "-" + rev)
10798                                 else:
10799                                         pkgs.append(ver)
10800                         if pkgs:
10801                                 pkgs = ", ".join(pkgs)
10802                                 print "%-20s %s" % (x+":", pkgs)
10803                 else:
10804                         print "%-20s %s" % (x+":", "[NOT VALID]")
10805
10806         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
10807
10808         if "--verbose" in myopts:
10809                 myvars=settings.keys()
10810         else:
10811                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
10812                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
10813                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
10814                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
10815
10816                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
10817
10818         myvars = portage.util.unique_array(myvars)
10819         use_expand = settings.get('USE_EXPAND', '').split()
10820         use_expand.sort()
10821         use_expand_hidden = set(
10822                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
10823         alphabetical_use = '--alphabetical' in myopts
10824         root_config = trees[settings["ROOT"]]['root_config']
10825         unset_vars = []
10826         myvars.sort()
10827         for x in myvars:
10828                 if x in settings:
10829                         if x != "USE":
10830                                 print '%s="%s"' % (x, settings[x])
10831                         else:
10832                                 use = set(settings["USE"].split())
10833                                 for varname in use_expand:
10834                                         flag_prefix = varname.lower() + "_"
10835                                         for f in list(use):
10836                                                 if f.startswith(flag_prefix):
10837                                                         use.remove(f)
10838                                 use = list(use)
10839                                 use.sort()
10840                                 print 'USE="%s"' % " ".join(use),
10841                                 for varname in use_expand:
10842                                         myval = settings.get(varname)
10843                                         if myval:
10844                                                 print '%s="%s"' % (varname, myval),
10845                                 print
10846                 else:
10847                         unset_vars.append(x)
10848         if unset_vars:
10849                 print "Unset:  "+", ".join(unset_vars)
10850         print
10851
10852         if "--debug" in myopts:
10853                 for x in dir(portage):
10854                         module = getattr(portage, x)
10855                         if "cvs_id_string" in dir(module):
10856                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
10857
10858         # See if we can find any packages installed matching the strings
10859         # passed on the command line
10860         mypkgs = []
10861         vardb = trees[settings["ROOT"]]["vartree"].dbapi
10862         portdb = trees[settings["ROOT"]]["porttree"].dbapi
10863         for x in myfiles:
10864                 mypkgs.extend(vardb.match(x))
10865
10866         # If some packages were found...
10867         if mypkgs:
10868                 # Get our global settings (we only print stuff if it varies from
10869                 # the current config)
10870                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
10871                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
10872                 auxkeys.append('DEFINED_PHASES')
10873                 global_vals = {}
10874                 pkgsettings = portage.config(clone=settings)
10875
10876                 # Loop through each package
10877                 # Only print settings if they differ from global settings
10878                 header_title = "Package Settings"
10879                 print header_width * "="
10880                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
10881                 print header_width * "="
10882                 from portage.output import EOutput
10883                 out = EOutput()
10884                 for cpv in mypkgs:
10885                         # Get all package specific variables
10886                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
10887                         pkg = Package(built=True, cpv=cpv,
10888                                 installed=True, metadata=izip(Package.metadata_keys,
10889                                 (metadata.get(x, '') for x in Package.metadata_keys)),
10890                                 root_config=root_config, type_name='installed')
10891
10892                         print "\n%s was built with the following:" % \
10893                                 colorize("INFORM", str(pkg.cpv))
10894
10895                         pkgsettings.setcpv(pkg)
10896                         forced_flags = set(chain(pkgsettings.useforce,
10897                                 pkgsettings.usemask))
10898                         use = set(pkg.use.enabled)
10899                         use.discard(pkgsettings.get('ARCH'))
10900                         use_expand_flags = set()
10901                         use_enabled = {}
10902                         use_disabled = {}
10903                         for varname in use_expand:
10904                                 flag_prefix = varname.lower() + "_"
10905                                 for f in use:
10906                                         if f.startswith(flag_prefix):
10907                                                 use_expand_flags.add(f)
10908                                                 use_enabled.setdefault(
10909                                                         varname.upper(), []).append(f[len(flag_prefix):])
10910
10911                                 for f in pkg.iuse.all:
10912                                         if f.startswith(flag_prefix):
10913                                                 use_expand_flags.add(f)
10914                                                 if f not in use:
10915                                                         use_disabled.setdefault(
10916                                                                 varname.upper(), []).append(f[len(flag_prefix):])
10917
10918                         var_order = set(use_enabled)
10919                         var_order.update(use_disabled)
10920                         var_order = sorted(var_order)
10921                         var_order.insert(0, 'USE')
10922                         use.difference_update(use_expand_flags)
10923                         use_enabled['USE'] = list(use)
10924                         use_disabled['USE'] = []
10925
10926                         for f in pkg.iuse.all:
10927                                 if f not in use and \
10928                                         f not in use_expand_flags:
10929                                         use_disabled['USE'].append(f)
10930
10931                         for varname in var_order:
10932                                 if varname in use_expand_hidden:
10933                                         continue
10934                                 flags = []
10935                                 for f in use_enabled.get(varname, []):
10936                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
10937                                 for f in use_disabled.get(varname, []):
10938                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
10939                                 if alphabetical_use:
10940                                         flags.sort(key=UseFlagDisplay.sort_combined)
10941                                 else:
10942                                         flags.sort(key=UseFlagDisplay.sort_separated)
10943                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
10944                         print
10945
10946                         for myvar in mydesiredvars:
10947                                 if metadata[myvar].split() != settings.get(myvar, '').split():
10948                                         print "%s=\"%s\"" % (myvar, metadata[myvar])
10949                         print
10950
10951                         if metadata['DEFINED_PHASES']:
10952                                 if 'info' not in metadata['DEFINED_PHASES'].split():
10953                                         continue
10954
10955                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
10956                         ebuildpath = vardb.findname(pkg.cpv)
10957                         if not ebuildpath or not os.path.exists(ebuildpath):
10958                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
10959                                 continue
10960                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
10961                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
10962                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
10963                                 tree="vartree")
10964
10965 def action_search(root_config, myopts, myfiles, spinner):
10966         if not myfiles:
10967                 print "emerge: no search terms provided."
10968         else:
10969                 searchinstance = search(root_config,
10970                         spinner, "--searchdesc" in myopts,
10971                         "--quiet" not in myopts, "--usepkg" in myopts,
10972                         "--usepkgonly" in myopts)
10973                 for mysearch in myfiles:
10974                         try:
10975                                 searchinstance.execute(mysearch)
10976                         except re.error, comment:
10977                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
10978                                 sys.exit(1)
10979                         searchinstance.output()
10980
10981 def action_uninstall(settings, trees, ldpath_mtimes,
10982         opts, action, files, spinner):
10983
10984         # For backward compat, some actions do not require leading '='.
10985         ignore_missing_eq = action in ('clean', 'unmerge')
10986         root = settings['ROOT']
10987         vardb = trees[root]['vartree'].dbapi
10988         valid_atoms = []
10989         lookup_owners = []
10990
10991         # Ensure atoms are valid before calling unmerge().
10992         # For backward compat, leading '=' is not required.
10993         for x in files:
10994                 if is_valid_package_atom(x) or \
10995                         (ignore_missing_eq and is_valid_package_atom('=' + x)):
10996
10997                         try:
10998                                 valid_atoms.append(
10999                                         portage.dep_expand(x, mydb=vardb, settings=settings))
11000                         except portage.exception.AmbiguousPackageName, e:
11001                                 msg = "The short ebuild name \"" + x + \
11002                                         "\" is ambiguous.  Please specify " + \
11003                                         "one of the following " + \
11004                                         "fully-qualified ebuild names instead:"
11005                                 for line in textwrap.wrap(msg, 70):
11006                                         writemsg_level("!!! %s\n" % (line,),
11007                                                 level=logging.ERROR, noiselevel=-1)
11008                                 for i in e[0]:
11009                                         writemsg_level("    %s\n" % colorize("INFORM", i),
11010                                                 level=logging.ERROR, noiselevel=-1)
11011                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
11012                                 return 1
11013
11014                 elif x.startswith(os.sep):
11015                         if not x.startswith(root):
11016                                 writemsg_level(("!!! '%s' does not start with" + \
11017                                         " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
11018                                 return 1
11019                         # Queue these up since it's most efficient to handle
11020                         # multiple files in a single iter_owners() call.
11021                         lookup_owners.append(x)
11022
11023                 else:
11024                         msg = []
11025                         msg.append("'%s' is not a valid package atom." % (x,))
11026                         msg.append("Please check ebuild(5) for full details.")
11027                         writemsg_level("".join("!!! %s\n" % line for line in msg),
11028                                 level=logging.ERROR, noiselevel=-1)
11029                         return 1
11030
11031         if lookup_owners:
11032                 relative_paths = []
11033                 search_for_multiple = False
11034                 if len(lookup_owners) > 1:
11035                         search_for_multiple = True
11036
11037                 for x in lookup_owners:
11038                         if not search_for_multiple and os.path.isdir(x):
11039                                 search_for_multiple = True
11040                         relative_paths.append(x[len(root):])
11041
11042                 owners = set()
11043                 for pkg, relative_path in \
11044                         vardb._owners.iter_owners(relative_paths):
11045                         owners.add(pkg.mycpv)
11046                         if not search_for_multiple:
11047                                 break
11048
11049                 if owners:
11050                         for cpv in owners:
11051                                 slot = vardb.aux_get(cpv, ['SLOT'])[0]
11052                                 if not slot:
11053                                         # portage now masks packages with missing slot, but it's
11054                                         # possible that one was installed by an older version
11055                                         atom = portage.cpv_getkey(cpv)
11056                                 else:
11057                                         atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
11058                                 valid_atoms.append(portage.dep.Atom(atom))
11059                 else:
11060                         writemsg_level(("!!! '%s' is not claimed " + \
11061                                 "by any package.\n") % lookup_owners[0],
11062                                 level=logging.WARNING, noiselevel=-1)
11063
11064         if files and not valid_atoms:
11065                 return 1
11066
11067         if action in ('clean', 'unmerge') or \
11068                 (action == 'prune' and "--nodeps" in opts):
11069                 # When given a list of atoms, unmerge them in the order given.
11070                 ordered = action == 'unmerge'
11071                 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
11072                         valid_atoms, ldpath_mtimes, ordered=ordered)
11073                 rval = os.EX_OK
11074         elif action == 'deselect':
11075                 rval = action_deselect(settings, trees, opts, valid_atoms)
11076         else:
11077                 rval = action_depclean(settings, trees, ldpath_mtimes,
11078                         opts, action, valid_atoms, spinner)
11079
11080         return rval
11081
11082 def action_deselect(settings, trees, opts, atoms):
11083         root_config = trees[settings['ROOT']]['root_config']
11084         world_set = root_config.sets['world']
11085         if not hasattr(world_set, 'update'):
11086                 writemsg_level("World set does not appear to be mutable.\n",
11087                         level=logging.ERROR, noiselevel=-1)
11088                 return 1
11089
11090         vardb = root_config.trees['vartree'].dbapi
11091         expanded_atoms = set(atoms)
11092         from portage.dep import Atom
11093         for atom in atoms:
11094                 for cpv in vardb.match(atom):
11095                         slot, = vardb.aux_get(cpv, ['SLOT'])
11096                         if not slot:
11097                                 slot = '0'
11098                         expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
11099
11100         pretend = '--pretend' in opts
11101         locked = False
11102         if not pretend and hasattr(world_set, 'lock'):
11103                 world_set.lock()
11104                 locked = True
11105         try:
11106                 discard_atoms = set()
11107                 world_set.load()
11108                 for atom in world_set:
11109                         if not isinstance(atom, Atom):
11110                                 # nested set
11111                                 continue
11112                         for arg_atom in expanded_atoms:
11113                                 if arg_atom.intersects(atom) and \
11114                                         not (arg_atom.slot and not atom.slot):
11115                                         discard_atoms.add(atom)
11116                                         break
11117                 if discard_atoms:
11118                         for atom in sorted(discard_atoms):
11119                                 print ">>> Removing %s from \"world\" favorites file..." % \
11120                                         colorize("INFORM", str(atom))
11121
11122                         if '--ask' in opts:
11123                                 prompt = "Would you like to remove these " + \
11124                                         "packages from your world favorites?"
11125                                 if userquery(prompt) == 'No':
11126                                         return os.EX_OK
11127
11128                         remaining = set(world_set)
11129                         remaining.difference_update(discard_atoms)
11130                         if not pretend:
11131                                 world_set.replace(remaining)
11132                 else:
11133                         print ">>> No matching atoms found in \"world\" favorites file..."
11134         finally:
11135                 if locked:
11136                         world_set.unlock()
11137         return os.EX_OK
11138
11139 def action_depclean(settings, trees, ldpath_mtimes,
11140         myopts, action, myfiles, spinner):
11141         # Kill packages that aren't explicitly merged or are required as a
11142         # dependency of another package. World file is explicit.
11143
11144         # Global depclean or prune operations are not very safe when there are
11145         # missing dependencies since it's unknown how badly incomplete
11146         # the dependency graph is, and we might accidentally remove packages
11147         # that should have been pulled into the graph. On the other hand, it's
11148         # relatively safe to ignore missing deps when only asked to remove
11149         # specific packages.
11150         allow_missing_deps = len(myfiles) > 0
11151
11152         msg = []
11153         msg.append("Always study the list of packages to be cleaned for any obvious\n")
11154         msg.append("mistakes. Packages that are part of the world set will always\n")
11155         msg.append("be kept.  They can be manually added to this set with\n")
11156         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
11157         msg.append("package.provided (see portage(5)) will be removed by\n")
11158         msg.append("depclean, even if they are part of the world set.\n")
11159         msg.append("\n")
11160         msg.append("As a safety measure, depclean will not remove any packages\n")
11161         msg.append("unless *all* required dependencies have been resolved.  As a\n")
11162         msg.append("consequence, it is often necessary to run %s\n" % \
11163                 good("`emerge --update"))
11164         msg.append(good("--newuse --deep @system @world`") + \
11165                 " prior to depclean.\n")
11166
11167         if action == "depclean" and "--quiet" not in myopts and not myfiles:
11168                 portage.writemsg_stdout("\n")
11169                 for x in msg:
11170                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
11171
11172         xterm_titles = "notitles" not in settings.features
11173         myroot = settings["ROOT"]
11174         root_config = trees[myroot]["root_config"]
11175         getSetAtoms = root_config.setconfig.getSetAtoms
11176         vardb = trees[myroot]["vartree"].dbapi
11177         deselect = myopts.get('--deselect') != 'n'
11178
11179         required_set_names = ("system", "world")
11180         required_sets = {}
11181         set_args = []
11182
11183         for s in required_set_names:
11184                 required_sets[s] = InternalPackageSet(
11185                         initial_atoms=getSetAtoms(s))
11186
11187         
11188         # When removing packages, use a temporary version of world
11189         # which excludes packages that are intended to be eligible for
11190         # removal.
11191         world_temp_set = required_sets["world"]
11192         system_set = required_sets["system"]
11193
11194         if not system_set or not world_temp_set:
11195
11196                 if not system_set:
11197                         writemsg_level("!!! You have no system list.\n",
11198                                 level=logging.ERROR, noiselevel=-1)
11199
11200                 if not world_temp_set:
11201                         writemsg_level("!!! You have no world file.\n",
11202                                         level=logging.WARNING, noiselevel=-1)
11203
11204                 writemsg_level("!!! Proceeding is likely to " + \
11205                         "break your installation.\n",
11206                         level=logging.WARNING, noiselevel=-1)
11207                 if "--pretend" not in myopts:
11208                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
11209
11210         if action == "depclean":
11211                 emergelog(xterm_titles, " >>> depclean")
11212
11213         import textwrap
11214         args_set = InternalPackageSet()
11215         if myfiles:
11216                 args_set.update(myfiles)
11217                 matched_packages = False
11218                 for x in args_set:
11219                         if vardb.match(x):
11220                                 matched_packages = True
11221                                 break
11222                 if not matched_packages:
11223                         writemsg_level(">>> No packages selected for removal by %s\n" % \
11224                                 action)
11225                         return
11226
11227         writemsg_level("\nCalculating dependencies  ")
11228         resolver_params = create_depgraph_params(myopts, "remove")
11229         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
11230         vardb = resolver.trees[myroot]["vartree"].dbapi
11231
11232         if action == "depclean":
11233
11234                 if args_set:
11235
11236                         if deselect:
11237                                 world_temp_set.clear()
11238
11239                         # Pull in everything that's installed but not matched
11240                         # by an argument atom since we don't want to clean any
11241                         # package if something depends on it.
11242                         for pkg in vardb:
11243                                 spinner.update()
11244
11245                                 try:
11246                                         if args_set.findAtomForPackage(pkg) is None:
11247                                                 world_temp_set.add("=" + pkg.cpv)
11248                                                 continue
11249                                 except portage.exception.InvalidDependString, e:
11250                                         show_invalid_depstring_notice(pkg,
11251                                                 pkg.metadata["PROVIDE"], str(e))
11252                                         del e
11253                                         world_temp_set.add("=" + pkg.cpv)
11254                                         continue
11255
11256         elif action == "prune":
11257
11258                 if deselect:
11259                         world_temp_set.clear()
11260
11261                 # Pull in everything that's installed since we don't
11262                 # to prune a package if something depends on it.
11263                 world_temp_set.update(vardb.cp_all())
11264
11265                 if not args_set:
11266
11267                         # Try to prune everything that's slotted.
11268                         for cp in vardb.cp_all():
11269                                 if len(vardb.cp_list(cp)) > 1:
11270                                         args_set.add(cp)
11271
11272                 # Remove atoms from world that match installed packages
11273                 # that are also matched by argument atoms, but do not remove
11274                 # them if they match the highest installed version.
11275                 for pkg in vardb:
11276                         spinner.update()
11277                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
11278                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
11279                                 raise AssertionError("package expected in matches: " + \
11280                                         "cp = %s, cpv = %s matches = %s" % \
11281                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
11282
11283                         highest_version = pkgs_for_cp[-1]
11284                         if pkg == highest_version:
11285                                 # pkg is the highest version
11286                                 world_temp_set.add("=" + pkg.cpv)
11287                                 continue
11288
11289                         if len(pkgs_for_cp) <= 1:
11290                                 raise AssertionError("more packages expected: " + \
11291                                         "cp = %s, cpv = %s matches = %s" % \
11292                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
11293
11294                         try:
11295                                 if args_set.findAtomForPackage(pkg) is None:
11296                                         world_temp_set.add("=" + pkg.cpv)
11297                                         continue
11298                         except portage.exception.InvalidDependString, e:
11299                                 show_invalid_depstring_notice(pkg,
11300                                         pkg.metadata["PROVIDE"], str(e))
11301                                 del e
11302                                 world_temp_set.add("=" + pkg.cpv)
11303                                 continue
11304
11305         set_args = {}
11306         for s, package_set in required_sets.iteritems():
11307                 set_atom = SETPREFIX + s
11308                 set_arg = SetArg(arg=set_atom, set=package_set,
11309                         root_config=resolver.roots[myroot])
11310                 set_args[s] = set_arg
11311                 for atom in set_arg.set:
11312                         resolver._dep_stack.append(
11313                                 Dependency(atom=atom, root=myroot, parent=set_arg))
11314                         resolver.digraph.add(set_arg, None)
11315
11316         success = resolver._complete_graph()
11317         writemsg_level("\b\b... done!\n")
11318
11319         resolver.display_problems()
11320
11321         if not success:
11322                 return 1
11323
11324         def unresolved_deps():
11325
11326                 unresolvable = set()
11327                 for dep in resolver._initially_unsatisfied_deps:
11328                         if isinstance(dep.parent, Package) and \
11329                                 (dep.priority > UnmergeDepPriority.SOFT):
11330                                 unresolvable.add((dep.atom, dep.parent.cpv))
11331
11332                 if not unresolvable:
11333                         return False
11334
11335                 if unresolvable and not allow_missing_deps:
11336                         prefix = bad(" * ")
11337                         msg = []
11338                         msg.append("Dependencies could not be completely resolved due to")
11339                         msg.append("the following required packages not being installed:")
11340                         msg.append("")
11341                         for atom, parent in unresolvable:
11342                                 msg.append("  %s pulled in by:" % (atom,))
11343                                 msg.append("    %s" % (parent,))
11344                                 msg.append("")
11345                         msg.append("Have you forgotten to run " + \
11346                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
11347                         msg.append(("to %s? It may be necessary to manually " + \
11348                                 "uninstall packages that no longer") % action)
11349                         msg.append("exist in the portage tree since " + \
11350                                 "it may not be possible to satisfy their")
11351                         msg.append("dependencies.  Also, be aware of " + \
11352                                 "the --with-bdeps option that is documented")
11353                         msg.append("in " + good("`man emerge`") + ".")
11354                         if action == "prune":
11355                                 msg.append("")
11356                                 msg.append("If you would like to ignore " + \
11357                                         "dependencies then use %s." % good("--nodeps"))
11358                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
11359                                 level=logging.ERROR, noiselevel=-1)
11360                         return True
11361                 return False
11362
11363         if unresolved_deps():
11364                 return 1
11365
11366         graph = resolver.digraph.copy()
11367         required_pkgs_total = 0
11368         for node in graph:
11369                 if isinstance(node, Package):
11370                         required_pkgs_total += 1
11371
11372         def show_parents(child_node):
11373                 parent_nodes = graph.parent_nodes(child_node)
11374                 if not parent_nodes:
11375                         # With --prune, the highest version can be pulled in without any
11376                         # real parent since all installed packages are pulled in.  In that
11377                         # case there's nothing to show here.
11378                         return
11379                 parent_strs = []
11380                 for node in parent_nodes:
11381                         parent_strs.append(str(getattr(node, "cpv", node)))
11382                 parent_strs.sort()
11383                 msg = []
11384                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
11385                 for parent_str in parent_strs:
11386                         msg.append("    %s\n" % (parent_str,))
11387                 msg.append("\n")
11388                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
11389
11390         def cmp_pkg_cpv(pkg1, pkg2):
11391                 """Sort Package instances by cpv."""
11392                 if pkg1.cpv > pkg2.cpv:
11393                         return 1
11394                 elif pkg1.cpv == pkg2.cpv:
11395                         return 0
11396                 else:
11397                         return -1
11398
11399         def create_cleanlist():
11400                 pkgs_to_remove = []
11401
11402                 if action == "depclean":
11403                         if args_set:
11404
11405                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11406                                         arg_atom = None
11407                                         try:
11408                                                 arg_atom = args_set.findAtomForPackage(pkg)
11409                                         except portage.exception.InvalidDependString:
11410                                                 # this error has already been displayed by now
11411                                                 continue
11412
11413                                         if arg_atom:
11414                                                 if pkg not in graph:
11415                                                         pkgs_to_remove.append(pkg)
11416                                                 elif "--verbose" in myopts:
11417                                                         show_parents(pkg)
11418
11419                         else:
11420                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
11421                                         if pkg not in graph:
11422                                                 pkgs_to_remove.append(pkg)
11423                                         elif "--verbose" in myopts:
11424                                                 show_parents(pkg)
11425
11426                 elif action == "prune":
11427                         # Prune really uses all installed instead of world. It's not
11428                         # a real reverse dependency so don't display it as such.
11429                         graph.remove(set_args["world"])
11430
11431                         for atom in args_set:
11432                                 for pkg in vardb.match_pkgs(atom):
11433                                         if pkg not in graph:
11434                                                 pkgs_to_remove.append(pkg)
11435                                         elif "--verbose" in myopts:
11436                                                 show_parents(pkg)
11437
11438                 if not pkgs_to_remove:
11439                         writemsg_level(
11440                                 ">>> No packages selected for removal by %s\n" % action)
11441                         if "--verbose" not in myopts:
11442                                 writemsg_level(
11443                                         ">>> To see reverse dependencies, use %s\n" % \
11444                                                 good("--verbose"))
11445                         if action == "prune":
11446                                 writemsg_level(
11447                                         ">>> To ignore dependencies, use %s\n" % \
11448                                                 good("--nodeps"))
11449
11450                 return pkgs_to_remove
11451
11452         cleanlist = create_cleanlist()
11453
11454         if len(cleanlist):
11455                 clean_set = set(cleanlist)
11456
11457                 # Check if any of these package are the sole providers of libraries
11458                 # with consumers that have not been selected for removal. If so, these
11459                 # packages and any dependencies need to be added to the graph.
11460                 real_vardb = trees[myroot]["vartree"].dbapi
11461                 linkmap = real_vardb.linkmap
11462                 liblist = linkmap.listLibraryObjects()
11463                 consumer_cache = {}
11464                 provider_cache = {}
11465                 soname_cache = {}
11466                 consumer_map = {}
11467
11468                 writemsg_level(">>> Checking for lib consumers...\n")
11469
11470                 for pkg in cleanlist:
11471                         pkg_dblink = real_vardb._dblink(pkg.cpv)
11472                         provided_libs = set()
11473
11474                         for lib in liblist:
11475                                 if pkg_dblink.isowner(lib, myroot):
11476                                         provided_libs.add(lib)
11477
11478                         if not provided_libs:
11479                                 continue
11480
11481                         consumers = {}
11482                         for lib in provided_libs:
11483                                 lib_consumers = consumer_cache.get(lib)
11484                                 if lib_consumers is None:
11485                                         lib_consumers = linkmap.findConsumers(lib)
11486                                         consumer_cache[lib] = lib_consumers
11487                                 if lib_consumers:
11488                                         consumers[lib] = lib_consumers
11489
11490                         if not consumers:
11491                                 continue
11492
11493                         for lib, lib_consumers in consumers.items():
11494                                 for consumer_file in list(lib_consumers):
11495                                         if pkg_dblink.isowner(consumer_file, myroot):
11496                                                 lib_consumers.remove(consumer_file)
11497                                 if not lib_consumers:
11498                                         del consumers[lib]
11499
11500                         if not consumers:
11501                                 continue
11502
11503                         for lib, lib_consumers in consumers.iteritems():
11504
11505                                 soname = soname_cache.get(lib)
11506                                 if soname is None:
11507                                         soname = linkmap.getSoname(lib)
11508                                         soname_cache[lib] = soname
11509
11510                                 consumer_providers = []
11511                                 for lib_consumer in lib_consumers:
11512                                         providers = provider_cache.get(lib)
11513                                         if providers is None:
11514                                                 providers = linkmap.findProviders(lib_consumer)
11515                                                 provider_cache[lib_consumer] = providers
11516                                         if soname not in providers:
11517                                                 # Why does this happen?
11518                                                 continue
11519                                         consumer_providers.append(
11520                                                 (lib_consumer, providers[soname]))
11521
11522                                 consumers[lib] = consumer_providers
11523
11524                         consumer_map[pkg] = consumers
11525
11526                 if consumer_map:
11527
11528                         search_files = set()
11529                         for consumers in consumer_map.itervalues():
11530                                 for lib, consumer_providers in consumers.iteritems():
11531                                         for lib_consumer, providers in consumer_providers:
11532                                                 search_files.add(lib_consumer)
11533                                                 search_files.update(providers)
11534
11535                         writemsg_level(">>> Assigning files to packages...\n")
11536                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
11537
11538                         for pkg, consumers in consumer_map.items():
11539                                 for lib, consumer_providers in consumers.items():
11540                                         lib_consumers = set()
11541
11542                                         for lib_consumer, providers in consumer_providers:
11543                                                 owner_set = file_owners.get(lib_consumer)
11544                                                 provider_dblinks = set()
11545                                                 provider_pkgs = set()
11546
11547                                                 if len(providers) > 1:
11548                                                         for provider in providers:
11549                                                                 provider_set = file_owners.get(provider)
11550                                                                 if provider_set is not None:
11551                                                                         provider_dblinks.update(provider_set)
11552
11553                                                 if len(provider_dblinks) > 1:
11554                                                         for provider_dblink in provider_dblinks:
11555                                                                 pkg_key = ("installed", myroot,
11556                                                                         provider_dblink.mycpv, "nomerge")
11557                                                                 if pkg_key not in clean_set:
11558                                                                         provider_pkgs.add(vardb.get(pkg_key))
11559
11560                                                 if provider_pkgs:
11561                                                         continue
11562
11563                                                 if owner_set is not None:
11564                                                         lib_consumers.update(owner_set)
11565
11566                                         for consumer_dblink in list(lib_consumers):
11567                                                 if ("installed", myroot, consumer_dblink.mycpv,
11568                                                         "nomerge") in clean_set:
11569                                                         lib_consumers.remove(consumer_dblink)
11570                                                         continue
11571
11572                                         if lib_consumers:
11573                                                 consumers[lib] = lib_consumers
11574                                         else:
11575                                                 del consumers[lib]
11576                                 if not consumers:
11577                                         del consumer_map[pkg]
11578
11579                 if consumer_map:
11580                         # TODO: Implement a package set for rebuilding consumer packages.
11581
11582                         msg = "In order to avoid breakage of link level " + \
11583                                 "dependencies, one or more packages will not be removed. " + \
11584                                 "This can be solved by rebuilding " + \
11585                                 "the packages that pulled them in."
11586
11587                         prefix = bad(" * ")
11588                         from textwrap import wrap
11589                         writemsg_level("".join(prefix + "%s\n" % line for \
11590                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
11591
11592                         msg = []
11593                         for pkg, consumers in consumer_map.iteritems():
11594                                 unique_consumers = set(chain(*consumers.values()))
11595                                 unique_consumers = sorted(consumer.mycpv \
11596                                         for consumer in unique_consumers)
11597                                 msg.append("")
11598                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
11599                                 for consumer in unique_consumers:
11600                                         msg.append("    %s" % (consumer,))
11601                         msg.append("")
11602                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
11603                                 level=logging.WARNING, noiselevel=-1)
11604
11605                         # Add lib providers to the graph as children of lib consumers,
11606                         # and also add any dependencies pulled in by the provider.
11607                         writemsg_level(">>> Adding lib providers to graph...\n")
11608
11609                         for pkg, consumers in consumer_map.iteritems():
11610                                 for consumer_dblink in set(chain(*consumers.values())):
11611                                         consumer_pkg = vardb.get(("installed", myroot,
11612                                                 consumer_dblink.mycpv, "nomerge"))
11613                                         if not resolver._add_pkg(pkg,
11614                                                 Dependency(parent=consumer_pkg,
11615                                                 priority=UnmergeDepPriority(runtime=True),
11616                                                 root=pkg.root)):
11617                                                 resolver.display_problems()
11618                                                 return 1
11619
11620                         writemsg_level("\nCalculating dependencies  ")
11621                         success = resolver._complete_graph()
11622                         writemsg_level("\b\b... done!\n")
11623                         resolver.display_problems()
11624                         if not success:
11625                                 return 1
11626                         if unresolved_deps():
11627                                 return 1
11628
11629                         graph = resolver.digraph.copy()
11630                         required_pkgs_total = 0
11631                         for node in graph:
11632                                 if isinstance(node, Package):
11633                                         required_pkgs_total += 1
11634                         cleanlist = create_cleanlist()
11635                         if not cleanlist:
11636                                 return 0
11637                         clean_set = set(cleanlist)
11638
11639                 # Use a topological sort to create an unmerge order such that
11640                 # each package is unmerged before it's dependencies. This is
11641                 # necessary to avoid breaking things that may need to run
11642                 # during pkg_prerm or pkg_postrm phases.
11643
11644                 # Create a new graph to account for dependencies between the
11645                 # packages being unmerged.
11646                 graph = digraph()
11647                 del cleanlist[:]
11648
11649                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
11650                 runtime = UnmergeDepPriority(runtime=True)
11651                 runtime_post = UnmergeDepPriority(runtime_post=True)
11652                 buildtime = UnmergeDepPriority(buildtime=True)
11653                 priority_map = {
11654                         "RDEPEND": runtime,
11655                         "PDEPEND": runtime_post,
11656                         "DEPEND": buildtime,
11657                 }
11658
11659                 for node in clean_set:
11660                         graph.add(node, None)
11661                         mydeps = []
11662                         node_use = node.metadata["USE"].split()
11663                         for dep_type in dep_keys:
11664                                 depstr = node.metadata[dep_type]
11665                                 if not depstr:
11666                                         continue
11667                                 try:
11668                                         portage.dep._dep_check_strict = False
11669                                         success, atoms = portage.dep_check(depstr, None, settings,
11670                                                 myuse=node_use, trees=resolver._graph_trees,
11671                                                 myroot=myroot)
11672                                 finally:
11673                                         portage.dep._dep_check_strict = True
11674                                 if not success:
11675                                         # Ignore invalid deps of packages that will
11676                                         # be uninstalled anyway.
11677                                         continue
11678
11679                                 priority = priority_map[dep_type]
11680                                 for atom in atoms:
11681                                         if not isinstance(atom, portage.dep.Atom):
11682                                                 # Ignore invalid atoms returned from dep_check().
11683                                                 continue
11684                                         if atom.blocker:
11685                                                 continue
11686                                         matches = vardb.match_pkgs(atom)
11687                                         if not matches:
11688                                                 continue
11689                                         for child_node in matches:
11690                                                 if child_node in clean_set:
11691                                                         graph.add(child_node, node, priority=priority)
11692
11693                 ordered = True
11694                 if len(graph.order) == len(graph.root_nodes()):
11695                         # If there are no dependencies between packages
11696                         # let unmerge() group them by cat/pn.
11697                         ordered = False
11698                         cleanlist = [pkg.cpv for pkg in graph.order]
11699                 else:
11700                         # Order nodes from lowest to highest overall reference count for
11701                         # optimal root node selection.
11702                         node_refcounts = {}
11703                         for node in graph.order:
11704                                 node_refcounts[node] = len(graph.parent_nodes(node))
11705                         def cmp_reference_count(node1, node2):
11706                                 return node_refcounts[node1] - node_refcounts[node2]
11707                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
11708         
11709                         ignore_priority_range = [None]
11710                         ignore_priority_range.extend(
11711                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
11712                         while not graph.empty():
11713                                 for ignore_priority in ignore_priority_range:
11714                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
11715                                         if nodes:
11716                                                 break
11717                                 if not nodes:
11718                                         raise AssertionError("no root nodes")
11719                                 if ignore_priority is not None:
11720                                         # Some deps have been dropped due to circular dependencies,
11721                                         # so only pop one node in order do minimize the number that
11722                                         # are dropped.
11723                                         del nodes[1:]
11724                                 for node in nodes:
11725                                         graph.remove(node)
11726                                         cleanlist.append(node.cpv)
11727
11728                 unmerge(root_config, myopts, "unmerge", cleanlist,
11729                         ldpath_mtimes, ordered=ordered)
11730
11731         if action == "prune":
11732                 return
11733
11734         if not cleanlist and "--quiet" in myopts:
11735                 return
11736
11737         print "Packages installed:   "+str(len(vardb.cpv_all()))
11738         print "Packages in world:    " + \
11739                 str(len(root_config.sets["world"].getAtoms()))
11740         print "Packages in system:   " + \
11741                 str(len(root_config.sets["system"].getAtoms()))
11742         print "Required packages:    "+str(required_pkgs_total)
11743         if "--pretend" in myopts:
11744                 print "Number to remove:     "+str(len(cleanlist))
11745         else:
11746                 print "Number removed:       "+str(len(cleanlist))
11747
11748 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
11749         """
11750         Construct a depgraph for the given resume list. This will raise
11751         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
11752         @rtype: tuple
11753         @returns: (success, depgraph, dropped_tasks)
11754         """
11755         skip_masked = True
11756         skip_unsatisfied = True
11757         mergelist = mtimedb["resume"]["mergelist"]
11758         dropped_tasks = set()
11759         while True:
11760                 mydepgraph = depgraph(settings, trees,
11761                         myopts, myparams, spinner)
11762                 try:
11763                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
11764                                 skip_masked=skip_masked)
11765                 except depgraph.UnsatisfiedResumeDep, e:
11766                         if not skip_unsatisfied:
11767                                 raise
11768
11769                         graph = mydepgraph.digraph
11770                         unsatisfied_parents = dict((dep.parent, dep.parent) \
11771                                 for dep in e.value)
11772                         traversed_nodes = set()
11773                         unsatisfied_stack = list(unsatisfied_parents)
11774                         while unsatisfied_stack:
11775                                 pkg = unsatisfied_stack.pop()
11776                                 if pkg in traversed_nodes:
11777                                         continue
11778                                 traversed_nodes.add(pkg)
11779
11780                                 # If this package was pulled in by a parent
11781                                 # package scheduled for merge, removing this
11782                                 # package may cause the the parent package's
11783                                 # dependency to become unsatisfied.
11784                                 for parent_node in graph.parent_nodes(pkg):
11785                                         if not isinstance(parent_node, Package) \
11786                                                 or parent_node.operation not in ("merge", "nomerge"):
11787                                                 continue
11788                                         unsatisfied = \
11789                                                 graph.child_nodes(parent_node,
11790                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
11791                                         if pkg in unsatisfied:
11792                                                 unsatisfied_parents[parent_node] = parent_node
11793                                                 unsatisfied_stack.append(parent_node)
11794
11795                         pruned_mergelist = []
11796                         for x in mergelist:
11797                                 if isinstance(x, list) and \
11798                                         tuple(x) not in unsatisfied_parents:
11799                                         pruned_mergelist.append(x)
11800
11801                         # If the mergelist doesn't shrink then this loop is infinite.
11802                         if len(pruned_mergelist) == len(mergelist):
11803                                 # This happens if a package can't be dropped because
11804                                 # it's already installed, but it has unsatisfied PDEPEND.
11805                                 raise
11806                         mergelist[:] = pruned_mergelist
11807
11808                         # Exclude installed packages that have been removed from the graph due
11809                         # to failure to build/install runtime dependencies after the dependent
11810                         # package has already been installed.
11811                         dropped_tasks.update(pkg for pkg in \
11812                                 unsatisfied_parents if pkg.operation != "nomerge")
11813                         mydepgraph.break_refs(unsatisfied_parents)
11814
11815                         del e, graph, traversed_nodes, \
11816                                 unsatisfied_parents, unsatisfied_stack
11817                         continue
11818                 else:
11819                         break
11820         return (success, mydepgraph, dropped_tasks)
11821
11822 def action_build(settings, trees, mtimedb,
11823         myopts, myaction, myfiles, spinner):
11824
11825         # validate the state of the resume data
11826         # so that we can make assumptions later.
11827         for k in ("resume", "resume_backup"):
11828                 if k not in mtimedb:
11829                         continue
11830                 resume_data = mtimedb[k]
11831                 if not isinstance(resume_data, dict):
11832                         del mtimedb[k]
11833                         continue
11834                 mergelist = resume_data.get("mergelist")
11835                 if not isinstance(mergelist, list):
11836                         del mtimedb[k]
11837                         continue
11838                 for x in mergelist:
11839                         if not (isinstance(x, list) and len(x) == 4):
11840                                 continue
11841                         pkg_type, pkg_root, pkg_key, pkg_action = x
11842                         if pkg_root not in trees:
11843                                 # Current $ROOT setting differs,
11844                                 # so the list must be stale.
11845                                 mergelist = None
11846                                 break
11847                 if not mergelist:
11848                         del mtimedb[k]
11849                         continue
11850                 resume_opts = resume_data.get("myopts")
11851                 if not isinstance(resume_opts, (dict, list)):
11852                         del mtimedb[k]
11853                         continue
11854                 favorites = resume_data.get("favorites")
11855                 if not isinstance(favorites, list):
11856                         del mtimedb[k]
11857                         continue
11858
11859         resume = False
11860         if "--resume" in myopts and \
11861                 ("resume" in mtimedb or
11862                 "resume_backup" in mtimedb):
11863                 resume = True
11864                 if "resume" not in mtimedb:
11865                         mtimedb["resume"] = mtimedb["resume_backup"]
11866                         del mtimedb["resume_backup"]
11867                         mtimedb.commit()
11868                 # "myopts" is a list for backward compatibility.
11869                 resume_opts = mtimedb["resume"].get("myopts", [])
11870                 if isinstance(resume_opts, list):
11871                         resume_opts = dict((k,True) for k in resume_opts)
11872                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
11873                         resume_opts.pop(opt, None)
11874
11875                 # Current options always override resume_opts.
11876                 resume_opts.update(myopts)
11877                 myopts.clear()
11878                 myopts.update(resume_opts)
11879
11880                 if "--debug" in myopts:
11881                         writemsg_level("myopts %s\n" % (myopts,))
11882
11883                 # Adjust config according to options of the command being resumed.
11884                 for myroot in trees:
11885                         mysettings =  trees[myroot]["vartree"].settings
11886                         mysettings.unlock()
11887                         adjust_config(myopts, mysettings)
11888                         mysettings.lock()
11889                         del myroot, mysettings
11890
11891         ldpath_mtimes = mtimedb["ldpath"]
11892         favorites=[]
11893         merge_count = 0
11894         buildpkgonly = "--buildpkgonly" in myopts
11895         pretend = "--pretend" in myopts
11896         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
11897         ask = "--ask" in myopts
11898         nodeps = "--nodeps" in myopts
11899         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
11900         tree = "--tree" in myopts
11901         if nodeps and tree:
11902                 tree = False
11903                 del myopts["--tree"]
11904                 portage.writemsg(colorize("WARN", " * ") + \
11905                         "--tree is broken with --nodeps. Disabling...\n")
11906         debug = "--debug" in myopts
11907         verbose = "--verbose" in myopts
11908         quiet = "--quiet" in myopts
11909         if pretend or fetchonly:
11910                 # make the mtimedb readonly
11911                 mtimedb.filename = None
11912         if '--digest' in myopts or 'digest' in settings.features:
11913                 if '--digest' in myopts:
11914                         msg = "The --digest option"
11915                 else:
11916                         msg = "The FEATURES=digest setting"
11917
11918                 msg += " can prevent corruption from being" + \
11919                         " noticed. The `repoman manifest` command is the preferred" + \
11920                         " way to generate manifests and it is capable of doing an" + \
11921                         " entire repository or category at once."
11922                 prefix = bad(" * ")
11923                 writemsg(prefix + "\n")
11924                 from textwrap import wrap
11925                 for line in wrap(msg, 72):
11926                         writemsg("%s%s\n" % (prefix, line))
11927                 writemsg(prefix + "\n")
11928
11929         if "--quiet" not in myopts and \
11930                 ("--pretend" in myopts or "--ask" in myopts or \
11931                 "--tree" in myopts or "--verbose" in myopts):
11932                 action = ""
11933                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
11934                         action = "fetched"
11935                 elif "--buildpkgonly" in myopts:
11936                         action = "built"
11937                 else:
11938                         action = "merged"
11939                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
11940                         print
11941                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
11942                         print
11943                 else:
11944                         print
11945                         print darkgreen("These are the packages that would be %s, in order:") % action
11946                         print
11947
11948         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
11949         if not show_spinner:
11950                 spinner.update = spinner.update_quiet
11951
11952         if resume:
11953                 favorites = mtimedb["resume"].get("favorites")
11954                 if not isinstance(favorites, list):
11955                         favorites = []
11956
11957                 if show_spinner:
11958                         print "Calculating dependencies  ",
11959                 myparams = create_depgraph_params(myopts, myaction)
11960
11961                 resume_data = mtimedb["resume"]
11962                 mergelist = resume_data["mergelist"]
11963                 if mergelist and "--skipfirst" in myopts:
11964                         for i, task in enumerate(mergelist):
11965                                 if isinstance(task, list) and \
11966                                         task and task[-1] == "merge":
11967                                         del mergelist[i]
11968                                         break
11969
11970                 success = False
11971                 mydepgraph = None
11972                 try:
11973                         success, mydepgraph, dropped_tasks = resume_depgraph(
11974                                 settings, trees, mtimedb, myopts, myparams, spinner)
11975                 except (portage.exception.PackageNotFound,
11976                         depgraph.UnsatisfiedResumeDep), e:
11977                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
11978                                 mydepgraph = e.depgraph
11979                         if show_spinner:
11980                                 print
11981                         from textwrap import wrap
11982                         from portage.output import EOutput
11983                         out = EOutput()
11984
11985                         resume_data = mtimedb["resume"]
11986                         mergelist = resume_data.get("mergelist")
11987                         if not isinstance(mergelist, list):
11988                                 mergelist = []
11989                         if mergelist and debug or (verbose and not quiet):
11990                                 out.eerror("Invalid resume list:")
11991                                 out.eerror("")
11992                                 indent = "  "
11993                                 for task in mergelist:
11994                                         if isinstance(task, list):
11995                                                 out.eerror(indent + str(tuple(task)))
11996                                 out.eerror("")
11997
11998                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
11999                                 out.eerror("One or more packages are either masked or " + \
12000                                         "have missing dependencies:")
12001                                 out.eerror("")
12002                                 indent = "  "
12003                                 for dep in e.value:
12004                                         if dep.atom is None:
12005                                                 out.eerror(indent + "Masked package:")
12006                                                 out.eerror(2 * indent + str(dep.parent))
12007                                                 out.eerror("")
12008                                         else:
12009                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
12010                                                 out.eerror(2 * indent + str(dep.parent))
12011                                                 out.eerror("")
12012                                 msg = "The resume list contains packages " + \
12013                                         "that are either masked or have " + \
12014                                         "unsatisfied dependencies. " + \
12015                                         "Please restart/continue " + \
12016                                         "the operation manually, or use --skipfirst " + \
12017                                         "to skip the first package in the list and " + \
12018                                         "any other packages that may be " + \
12019                                         "masked or have missing dependencies."
12020                                 for line in wrap(msg, 72):
12021                                         out.eerror(line)
12022                         elif isinstance(e, portage.exception.PackageNotFound):
12023                                 out.eerror("An expected package is " + \
12024                                         "not available: %s" % str(e))
12025                                 out.eerror("")
12026                                 msg = "The resume list contains one or more " + \
12027                                         "packages that are no longer " + \
12028                                         "available. Please restart/continue " + \
12029                                         "the operation manually."
12030                                 for line in wrap(msg, 72):
12031                                         out.eerror(line)
12032                 else:
12033                         if show_spinner:
12034                                 print "\b\b... done!"
12035
12036                 if success:
12037                         if dropped_tasks:
12038                                 portage.writemsg("!!! One or more packages have been " + \
12039                                         "dropped due to\n" + \
12040                                         "!!! masking or unsatisfied dependencies:\n\n",
12041                                         noiselevel=-1)
12042                                 for task in dropped_tasks:
12043                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
12044                                 portage.writemsg("\n", noiselevel=-1)
12045                         del dropped_tasks
12046                 else:
12047                         if mydepgraph is not None:
12048                                 mydepgraph.display_problems()
12049                         if not (ask or pretend):
12050                                 # delete the current list and also the backup
12051                                 # since it's probably stale too.
12052                                 for k in ("resume", "resume_backup"):
12053                                         mtimedb.pop(k, None)
12054                                 mtimedb.commit()
12055
12056                         return 1
12057         else:
12058                 if ("--resume" in myopts):
12059                         print darkgreen("emerge: It seems we have nothing to resume...")
12060                         return os.EX_OK
12061
12062                 myparams = create_depgraph_params(myopts, myaction)
12063                 if "--quiet" not in myopts and "--nodeps" not in myopts:
12064                         print "Calculating dependencies  ",
12065                         sys.stdout.flush()
12066                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
12067                 try:
12068                         retval, favorites = mydepgraph.select_files(myfiles)
12069                 except portage.exception.PackageNotFound, e:
12070                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
12071                         return 1
12072                 except portage.exception.PackageSetNotFound, e:
12073                         root_config = trees[settings["ROOT"]]["root_config"]
12074                         display_missing_pkg_set(root_config, e.value)
12075                         return 1
12076                 if show_spinner:
12077                         print "\b\b... done!"
12078                 if not retval:
12079                         mydepgraph.display_problems()
12080                         return 1
12081
12082         if "--pretend" not in myopts and \
12083                 ("--ask" in myopts or "--tree" in myopts or \
12084                 "--verbose" in myopts) and \
12085                 not ("--quiet" in myopts and "--ask" not in myopts):
12086                 if "--resume" in myopts:
12087                         mymergelist = mydepgraph.altlist()
12088                         if len(mymergelist) == 0:
12089                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12090                                 return os.EX_OK
12091                         favorites = mtimedb["resume"]["favorites"]
12092                         retval = mydepgraph.display(
12093                                 mydepgraph.altlist(reversed=tree),
12094                                 favorites=favorites)
12095                         mydepgraph.display_problems()
12096                         if retval != os.EX_OK:
12097                                 return retval
12098                         prompt="Would you like to resume merging these packages?"
12099                 else:
12100                         retval = mydepgraph.display(
12101                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
12102                                 favorites=favorites)
12103                         mydepgraph.display_problems()
12104                         if retval != os.EX_OK:
12105                                 return retval
12106                         mergecount=0
12107                         for x in mydepgraph.altlist():
12108                                 if isinstance(x, Package) and x.operation == "merge":
12109                                         mergecount += 1
12110
12111                         if mergecount==0:
12112                                 sets = trees[settings["ROOT"]]["root_config"].sets
12113                                 world_candidates = None
12114                                 if "--noreplace" in myopts and \
12115                                         not oneshot and favorites:
12116                                         # Sets that are not world candidates are filtered
12117                                         # out here since the favorites list needs to be
12118                                         # complete for depgraph.loadResumeCommand() to
12119                                         # operate correctly.
12120                                         world_candidates = [x for x in favorites \
12121                                                 if not (x.startswith(SETPREFIX) and \
12122                                                 not sets[x[1:]].world_candidate)]
12123                                 if "--noreplace" in myopts and \
12124                                         not oneshot and world_candidates:
12125                                         print
12126                                         for x in world_candidates:
12127                                                 print " %s %s" % (good("*"), x)
12128                                         prompt="Would you like to add these packages to your world favorites?"
12129                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
12130                                         prompt="Nothing to merge; would you like to auto-clean packages?"
12131                                 else:
12132                                         print
12133                                         print "Nothing to merge; quitting."
12134                                         print
12135                                         return os.EX_OK
12136                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12137                                 prompt="Would you like to fetch the source files for these packages?"
12138                         else:
12139                                 prompt="Would you like to merge these packages?"
12140                 print
12141                 if "--ask" in myopts and userquery(prompt) == "No":
12142                         print
12143                         print "Quitting."
12144                         print
12145                         return os.EX_OK
12146                 # Don't ask again (e.g. when auto-cleaning packages after merge)
12147                 myopts.pop("--ask", None)
12148
12149         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12150                 if ("--resume" in myopts):
12151                         mymergelist = mydepgraph.altlist()
12152                         if len(mymergelist) == 0:
12153                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12154                                 return os.EX_OK
12155                         favorites = mtimedb["resume"]["favorites"]
12156                         retval = mydepgraph.display(
12157                                 mydepgraph.altlist(reversed=tree),
12158                                 favorites=favorites)
12159                         mydepgraph.display_problems()
12160                         if retval != os.EX_OK:
12161                                 return retval
12162                 else:
12163                         retval = mydepgraph.display(
12164                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
12165                                 favorites=favorites)
12166                         mydepgraph.display_problems()
12167                         if retval != os.EX_OK:
12168                                 return retval
12169                         if "--buildpkgonly" in myopts:
12170                                 graph_copy = mydepgraph.digraph.clone()
12171                                 removed_nodes = set()
12172                                 for node in graph_copy:
12173                                         if not isinstance(node, Package) or \
12174                                                 node.operation == "nomerge":
12175                                                 removed_nodes.add(node)
12176                                 graph_copy.difference_update(removed_nodes)
12177                                 if not graph_copy.hasallzeros(ignore_priority = \
12178                                         DepPrioritySatisfiedRange.ignore_medium):
12179                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
12180                                         print "!!! You have to merge the dependencies before you can build this package.\n"
12181                                         return 1
12182         else:
12183                 if "--buildpkgonly" in myopts:
12184                         graph_copy = mydepgraph.digraph.clone()
12185                         removed_nodes = set()
12186                         for node in graph_copy:
12187                                 if not isinstance(node, Package) or \
12188                                         node.operation == "nomerge":
12189                                         removed_nodes.add(node)
12190                         graph_copy.difference_update(removed_nodes)
12191                         if not graph_copy.hasallzeros(ignore_priority = \
12192                                 DepPrioritySatisfiedRange.ignore_medium):
12193                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12194                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
12195                                 return 1
12196
12197                 if ("--resume" in myopts):
12198                         favorites=mtimedb["resume"]["favorites"]
12199                         mymergelist = mydepgraph.altlist()
12200                         mydepgraph.break_refs(mymergelist)
12201                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
12202                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
12203                         del mydepgraph, mymergelist
12204                         clear_caches(trees)
12205
12206                         retval = mergetask.merge()
12207                         merge_count = mergetask.curval
12208                 else:
12209                         if "resume" in mtimedb and \
12210                         "mergelist" in mtimedb["resume"] and \
12211                         len(mtimedb["resume"]["mergelist"]) > 1:
12212                                 mtimedb["resume_backup"] = mtimedb["resume"]
12213                                 del mtimedb["resume"]
12214                                 mtimedb.commit()
12215                         mtimedb["resume"]={}
12216                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
12217                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
12218                         # a list type for options.
12219                         mtimedb["resume"]["myopts"] = myopts.copy()
12220
12221                         # Convert Atom instances to plain str.
12222                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
12223
12224                         pkglist = mydepgraph.altlist()
12225                         mydepgraph.saveNomergeFavorites()
12226                         mydepgraph.break_refs(pkglist)
12227                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
12228                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
12229                         del mydepgraph, pkglist
12230                         clear_caches(trees)
12231
12232                         retval = mergetask.merge()
12233                         merge_count = mergetask.curval
12234
12235                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
12236                         if "yes" == settings.get("AUTOCLEAN"):
12237                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
12238                                 unmerge(trees[settings["ROOT"]]["root_config"],
12239                                         myopts, "clean", [],
12240                                         ldpath_mtimes, autoclean=1)
12241                         else:
12242                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
12243                                         + " AUTOCLEAN is disabled.  This can cause serious"
12244                                         + " problems due to overlapping packages.\n")
12245                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
12246
12247                 return retval
12248
12249 def multiple_actions(action1, action2):
12250         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
12251         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
12252         sys.exit(1)
12253
12254 def insert_optional_args(args):
12255         """
12256         Parse optional arguments and insert a value if one has
12257         not been provided. This is done before feeding the args
12258         to the optparse parser since that parser does not support
12259         this feature natively.
12260         """
12261
12262         new_args = []
12263         jobs_opts = ("-j", "--jobs")
12264         default_arg_opts = {
12265                 '--deselect'   : ('n',),
12266                 '--root-deps'  : ('rdeps',),
12267         }
12268         arg_stack = args[:]
12269         arg_stack.reverse()
12270         while arg_stack:
12271                 arg = arg_stack.pop()
12272
12273                 default_arg_choices = default_arg_opts.get(arg)
12274                 if default_arg_choices is not None:
12275                         new_args.append(arg)
12276                         if arg_stack and arg_stack[-1] in default_arg_choices:
12277                                 new_args.append(arg_stack.pop())
12278                         else:
12279                                 # insert default argument
12280                                 new_args.append('True')
12281                         continue
12282
12283                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
12284                 if not (short_job_opt or arg in jobs_opts):
12285                         new_args.append(arg)
12286                         continue
12287
12288                 # Insert an empty placeholder in order to
12289                 # satisfy the requirements of optparse.
12290
12291                 new_args.append("--jobs")
12292                 job_count = None
12293                 saved_opts = None
12294                 if short_job_opt and len(arg) > 2:
12295                         if arg[:2] == "-j":
12296                                 try:
12297                                         job_count = int(arg[2:])
12298                                 except ValueError:
12299                                         saved_opts = arg[2:]
12300                         else:
12301                                 job_count = "True"
12302                                 saved_opts = arg[1:].replace("j", "")
12303
12304                 if job_count is None and arg_stack:
12305                         try:
12306                                 job_count = int(arg_stack[-1])
12307                         except ValueError:
12308                                 pass
12309                         else:
12310                                 # Discard the job count from the stack
12311                                 # since we're consuming it here.
12312                                 arg_stack.pop()
12313
12314                 if job_count is None:
12315                         # unlimited number of jobs
12316                         new_args.append("True")
12317                 else:
12318                         new_args.append(str(job_count))
12319
12320                 if saved_opts is not None:
12321                         new_args.append("-" + saved_opts)
12322
12323         return new_args
12324
12325 def parse_opts(tmpcmdline, silent=False):
12326         myaction=None
12327         myopts = {}
12328         myfiles=[]
12329
12330         global actions, options, shortmapping
12331
12332         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
12333         argument_options = {
12334                 "--config-root": {
12335                         "help":"specify the location for portage configuration files",
12336                         "action":"store"
12337                 },
12338                 "--color": {
12339                         "help":"enable or disable color output",
12340                         "type":"choice",
12341                         "choices":("y", "n")
12342                 },
12343
12344                 "--deselect": {
12345                         "help"    : "remove atoms from the world file",
12346                         "type"    : "choice",
12347                         "choices" : ("True", "n")
12348                 },
12349
12350                 "--jobs": {
12351
12352                         "help"   : "Specifies the number of packages to build " + \
12353                                 "simultaneously.",
12354
12355                         "action" : "store"
12356                 },
12357
12358                 "--load-average": {
12359
12360                         "help"   :"Specifies that no new builds should be started " + \
12361                                 "if there are other builds running and the load average " + \
12362                                 "is at least LOAD (a floating-point number).",
12363
12364                         "action" : "store"
12365                 },
12366
12367                 "--with-bdeps": {
12368                         "help":"include unnecessary build time dependencies",
12369                         "type":"choice",
12370                         "choices":("y", "n")
12371                 },
12372                 "--reinstall": {
12373                         "help":"specify conditions to trigger package reinstallation",
12374                         "type":"choice",
12375                         "choices":["changed-use"]
12376                 },
12377                 "--root": {
12378                  "help"   : "specify the target root filesystem for merging packages",
12379                  "action" : "store"
12380                 },
12381
12382                 "--root-deps": {
12383                         "help"    : "modify interpretation of depedencies",
12384                         "type"    : "choice",
12385                         "choices" :("True", "rdeps")
12386                 },
12387         }
12388
12389         from optparse import OptionParser
12390         parser = OptionParser()
12391         if parser.has_option("--help"):
12392                 parser.remove_option("--help")
12393
12394         for action_opt in actions:
12395                 parser.add_option("--" + action_opt, action="store_true",
12396                         dest=action_opt.replace("-", "_"), default=False)
12397         for myopt in options:
12398                 parser.add_option(myopt, action="store_true",
12399                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
12400         for shortopt, longopt in shortmapping.iteritems():
12401                 parser.add_option("-" + shortopt, action="store_true",
12402                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
12403         for myalias, myopt in longopt_aliases.iteritems():
12404                 parser.add_option(myalias, action="store_true",
12405                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
12406
12407         for myopt, kwargs in argument_options.iteritems():
12408                 parser.add_option(myopt,
12409                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
12410
12411         tmpcmdline = insert_optional_args(tmpcmdline)
12412
12413         myoptions, myargs = parser.parse_args(args=tmpcmdline)
12414
12415         if myoptions.deselect == "True":
12416                 myoptions.deselect = True
12417
12418         if myoptions.root_deps == "True":
12419                 myoptions.root_deps = True
12420
12421         if myoptions.jobs:
12422                 jobs = None
12423                 if myoptions.jobs == "True":
12424                         jobs = True
12425                 else:
12426                         try:
12427                                 jobs = int(myoptions.jobs)
12428                         except ValueError:
12429                                 jobs = -1
12430
12431                 if jobs is not True and \
12432                         jobs < 1:
12433                         jobs = None
12434                         if not silent:
12435                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
12436                                         (myoptions.jobs,), noiselevel=-1)
12437
12438                 myoptions.jobs = jobs
12439
12440         if myoptions.load_average:
12441                 try:
12442                         load_average = float(myoptions.load_average)
12443                 except ValueError:
12444                         load_average = 0.0
12445
12446                 if load_average <= 0.0:
12447                         load_average = None
12448                         if not silent:
12449                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
12450                                         (myoptions.load_average,), noiselevel=-1)
12451
12452                 myoptions.load_average = load_average
12453
12454         for myopt in options:
12455                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
12456                 if v:
12457                         myopts[myopt] = True
12458
12459         for myopt in argument_options:
12460                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
12461                 if v is not None:
12462                         myopts[myopt] = v
12463
12464         if myoptions.searchdesc:
12465                 myoptions.search = True
12466
12467         for action_opt in actions:
12468                 v = getattr(myoptions, action_opt.replace("-", "_"))
12469                 if v:
12470                         if myaction:
12471                                 multiple_actions(myaction, action_opt)
12472                                 sys.exit(1)
12473                         myaction = action_opt
12474
12475         if myaction is None and myoptions.deselect is True:
12476                 myaction = 'deselect'
12477
12478         myfiles += myargs
12479
12480         return myaction, myopts, myfiles
12481
12482 def validate_ebuild_environment(trees):
12483         for myroot in trees:
12484                 settings = trees[myroot]["vartree"].settings
12485                 settings.validate()
12486
12487 def clear_caches(trees):
12488         for d in trees.itervalues():
12489                 d["porttree"].dbapi.melt()
12490                 d["porttree"].dbapi._aux_cache.clear()
12491                 d["bintree"].dbapi._aux_cache.clear()
12492                 d["bintree"].dbapi._clear_cache()
12493                 d["vartree"].dbapi.linkmap._clear_cache()
12494         portage.dircache.clear()
12495         gc.collect()
12496
12497 def load_emerge_config(trees=None):
12498         kwargs = {}
12499         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
12500                 v = os.environ.get(envvar, None)
12501                 if v and v.strip():
12502                         kwargs[k] = v
12503         trees = portage.create_trees(trees=trees, **kwargs)
12504
12505         for root, root_trees in trees.iteritems():
12506                 settings = root_trees["vartree"].settings
12507                 setconfig = load_default_config(settings, root_trees)
12508                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
12509
12510         settings = trees["/"]["vartree"].settings
12511
12512         for myroot in trees:
12513                 if myroot != "/":
12514                         settings = trees[myroot]["vartree"].settings
12515                         break
12516
12517         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
12518         mtimedb = portage.MtimeDB(mtimedbfile)
12519         
12520         return settings, trees, mtimedb
12521
12522 def adjust_config(myopts, settings):
12523         """Make emerge specific adjustments to the config."""
12524
12525         # To enhance usability, make some vars case insensitive by forcing them to
12526         # lower case.
12527         for myvar in ("AUTOCLEAN", "NOCOLOR"):
12528                 if myvar in settings:
12529                         settings[myvar] = settings[myvar].lower()
12530                         settings.backup_changes(myvar)
12531         del myvar
12532
12533         # Kill noauto as it will break merges otherwise.
12534         if "noauto" in settings.features:
12535                 settings.features.remove('noauto')
12536                 settings['FEATURES'] = ' '.join(sorted(settings.features))
12537                 settings.backup_changes("FEATURES")
12538
12539         CLEAN_DELAY = 5
12540         try:
12541                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
12542         except ValueError, e:
12543                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12544                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
12545                         settings["CLEAN_DELAY"], noiselevel=-1)
12546         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
12547         settings.backup_changes("CLEAN_DELAY")
12548
12549         EMERGE_WARNING_DELAY = 10
12550         try:
12551                 EMERGE_WARNING_DELAY = int(settings.get(
12552                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
12553         except ValueError, e:
12554                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12555                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
12556                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
12557         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
12558         settings.backup_changes("EMERGE_WARNING_DELAY")
12559
12560         if "--quiet" in myopts:
12561                 settings["PORTAGE_QUIET"]="1"
12562                 settings.backup_changes("PORTAGE_QUIET")
12563
12564         if "--verbose" in myopts:
12565                 settings["PORTAGE_VERBOSE"] = "1"
12566                 settings.backup_changes("PORTAGE_VERBOSE")
12567
12568         # Set so that configs will be merged regardless of remembered status
12569         if ("--noconfmem" in myopts):
12570                 settings["NOCONFMEM"]="1"
12571                 settings.backup_changes("NOCONFMEM")
12572
12573         # Set various debug markers... They should be merged somehow.
12574         PORTAGE_DEBUG = 0
12575         try:
12576                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
12577                 if PORTAGE_DEBUG not in (0, 1):
12578                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
12579                                 PORTAGE_DEBUG, noiselevel=-1)
12580                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
12581                                 noiselevel=-1)
12582                         PORTAGE_DEBUG = 0
12583         except ValueError, e:
12584                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
12585                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
12586                         settings["PORTAGE_DEBUG"], noiselevel=-1)
12587                 del e
12588         if "--debug" in myopts:
12589                 PORTAGE_DEBUG = 1
12590         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
12591         settings.backup_changes("PORTAGE_DEBUG")
12592
12593         if settings.get("NOCOLOR") not in ("yes","true"):
12594                 portage.output.havecolor = 1
12595
12596         """The explicit --color < y | n > option overrides the NOCOLOR environment
12597         variable and stdout auto-detection."""
12598         if "--color" in myopts:
12599                 if "y" == myopts["--color"]:
12600                         portage.output.havecolor = 1
12601                         settings["NOCOLOR"] = "false"
12602                 else:
12603                         portage.output.havecolor = 0
12604                         settings["NOCOLOR"] = "true"
12605                 settings.backup_changes("NOCOLOR")
12606         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
12607                 portage.output.havecolor = 0
12608                 settings["NOCOLOR"] = "true"
12609                 settings.backup_changes("NOCOLOR")
12610
12611 def apply_priorities(settings):
12612         ionice(settings)
12613         nice(settings)
12614
12615 def nice(settings):
12616         try:
12617                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
12618         except (OSError, ValueError), e:
12619                 out = portage.output.EOutput()
12620                 out.eerror("Failed to change nice value to '%s'" % \
12621                         settings["PORTAGE_NICENESS"])
12622                 out.eerror("%s\n" % str(e))
12623
12624 def ionice(settings):
12625
12626         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
12627         if ionice_cmd:
12628                 ionice_cmd = shlex.split(ionice_cmd)
12629         if not ionice_cmd:
12630                 return
12631
12632         from portage.util import varexpand
12633         variables = {"PID" : str(os.getpid())}
12634         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
12635
12636         try:
12637                 rval = portage.process.spawn(cmd, env=os.environ)
12638         except portage.exception.CommandNotFound:
12639                 # The OS kernel probably doesn't support ionice,
12640                 # so return silently.
12641                 return
12642
12643         if rval != os.EX_OK:
12644                 out = portage.output.EOutput()
12645                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
12646                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
12647
12648 def display_missing_pkg_set(root_config, set_name):
12649
12650         msg = []
12651         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
12652                 "The following sets exist:") % \
12653                 colorize("INFORM", set_name))
12654         msg.append("")
12655
12656         for s in sorted(root_config.sets):
12657                 msg.append("    %s" % s)
12658         msg.append("")
12659
12660         writemsg_level("".join("%s\n" % l for l in msg),
12661                 level=logging.ERROR, noiselevel=-1)
12662
12663 def expand_set_arguments(myfiles, myaction, root_config):
12664         retval = os.EX_OK
12665         setconfig = root_config.setconfig
12666
12667         sets = setconfig.getSets()
12668
12669         # In order to know exactly which atoms/sets should be added to the
12670         # world file, the depgraph performs set expansion later. It will get
12671         # confused about where the atoms came from if it's not allowed to
12672         # expand them itself.
12673         do_not_expand = (None, )
12674         newargs = []
12675         for a in myfiles:
12676                 if a in ("system", "world"):
12677                         newargs.append(SETPREFIX+a)
12678                 else:
12679                         newargs.append(a)
12680         myfiles = newargs
12681         del newargs
12682         newargs = []
12683
12684         # separators for set arguments
12685         ARG_START = "{"
12686         ARG_END = "}"
12687
12688         # WARNING: all operators must be of equal length
12689         IS_OPERATOR = "/@"
12690         DIFF_OPERATOR = "-@"
12691         UNION_OPERATOR = "+@"
12692         
12693         for i in range(0, len(myfiles)):
12694                 if myfiles[i].startswith(SETPREFIX):
12695                         start = 0
12696                         end = 0
12697                         x = myfiles[i][len(SETPREFIX):]
12698                         newset = ""
12699                         while x:
12700                                 start = x.find(ARG_START)
12701                                 end = x.find(ARG_END)
12702                                 if start > 0 and start < end:
12703                                         namepart = x[:start]
12704                                         argpart = x[start+1:end]
12705                                 
12706                                         # TODO: implement proper quoting
12707                                         args = argpart.split(",")
12708                                         options = {}
12709                                         for a in args:
12710                                                 if "=" in a:
12711                                                         k, v  = a.split("=", 1)
12712                                                         options[k] = v
12713                                                 else:
12714                                                         options[a] = "True"
12715                                         setconfig.update(namepart, options)
12716                                         newset += (x[:start-len(namepart)]+namepart)
12717                                         x = x[end+len(ARG_END):]
12718                                 else:
12719                                         newset += x
12720                                         x = ""
12721                         myfiles[i] = SETPREFIX+newset
12722                                 
12723         sets = setconfig.getSets()
12724
12725         # display errors that occured while loading the SetConfig instance
12726         for e in setconfig.errors:
12727                 print colorize("BAD", "Error during set creation: %s" % e)
12728         
12729         # emerge relies on the existance of sets with names "world" and "system"
12730         required_sets = ("world", "system")
12731         missing_sets = []
12732
12733         for s in required_sets:
12734                 if s not in sets:
12735                         missing_sets.append(s)
12736         if missing_sets:
12737                 if len(missing_sets) > 2:
12738                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
12739                         missing_sets_str += ', and "%s"' % missing_sets[-1]
12740                 elif len(missing_sets) == 2:
12741                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
12742                 else:
12743                         missing_sets_str = '"%s"' % missing_sets[-1]
12744                 msg = ["emerge: incomplete set configuration, " + \
12745                         "missing set(s): %s" % missing_sets_str]
12746                 if sets:
12747                         msg.append("        sets defined: %s" % ", ".join(sets))
12748                 msg.append("        This usually means that '%s'" % \
12749                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
12750                 msg.append("        is missing or corrupt.")
12751                 for line in msg:
12752                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
12753                 return (None, 1)
12754         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
12755
12756         for a in myfiles:
12757                 if a.startswith(SETPREFIX):
12758                         # support simple set operations (intersection, difference and union)
12759                         # on the commandline. Expressions are evaluated strictly left-to-right
12760                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
12761                                 expression = a[len(SETPREFIX):]
12762                                 expr_sets = []
12763                                 expr_ops = []
12764                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
12765                                         is_pos = expression.rfind(IS_OPERATOR)
12766                                         diff_pos = expression.rfind(DIFF_OPERATOR)
12767                                         union_pos = expression.rfind(UNION_OPERATOR)
12768                                         op_pos = max(is_pos, diff_pos, union_pos)
12769                                         s1 = expression[:op_pos]
12770                                         s2 = expression[op_pos+len(IS_OPERATOR):]
12771                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
12772                                         if not s2 in sets:
12773                                                 display_missing_pkg_set(root_config, s2)
12774                                                 return (None, 1)
12775                                         expr_sets.insert(0, s2)
12776                                         expr_ops.insert(0, op)
12777                                         expression = s1
12778                                 if not expression in sets:
12779                                         display_missing_pkg_set(root_config, expression)
12780                                         return (None, 1)
12781                                 expr_sets.insert(0, expression)
12782                                 result = set(setconfig.getSetAtoms(expression))
12783                                 for i in range(0, len(expr_ops)):
12784                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
12785                                         if expr_ops[i] == IS_OPERATOR:
12786                                                 result.intersection_update(s2)
12787                                         elif expr_ops[i] == DIFF_OPERATOR:
12788                                                 result.difference_update(s2)
12789                                         elif expr_ops[i] == UNION_OPERATOR:
12790                                                 result.update(s2)
12791                                         else:
12792                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
12793                                 newargs.extend(result)
12794                         else:                   
12795                                 s = a[len(SETPREFIX):]
12796                                 if s not in sets:
12797                                         display_missing_pkg_set(root_config, s)
12798                                         return (None, 1)
12799                                 setconfig.active.append(s)
12800                                 try:
12801                                         set_atoms = setconfig.getSetAtoms(s)
12802                                 except portage.exception.PackageSetNotFound, e:
12803                                         writemsg_level(("emerge: the given set '%s' " + \
12804                                                 "contains a non-existent set named '%s'.\n") % \
12805                                                 (s, e), level=logging.ERROR, noiselevel=-1)
12806                                         return (None, 1)
12807                                 if myaction in unmerge_actions and \
12808                                                 not sets[s].supportsOperation("unmerge"):
12809                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
12810                                                 "not support unmerge operations\n")
12811                                         retval = 1
12812                                 elif not set_atoms:
12813                                         print "emerge: '%s' is an empty set" % s
12814                                 elif myaction not in do_not_expand:
12815                                         newargs.extend(set_atoms)
12816                                 else:
12817                                         newargs.append(SETPREFIX+s)
12818                                 for e in sets[s].errors:
12819                                         print e
12820                 else:
12821                         newargs.append(a)
12822         return (newargs, retval)
12823
12824 def repo_name_check(trees):
12825         missing_repo_names = set()
12826         for root, root_trees in trees.iteritems():
12827                 if "porttree" in root_trees:
12828                         portdb = root_trees["porttree"].dbapi
12829                         missing_repo_names.update(portdb.porttrees)
12830                         repos = portdb.getRepositories()
12831                         for r in repos:
12832                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
12833                         if portdb.porttree_root in missing_repo_names and \
12834                                 not os.path.exists(os.path.join(
12835                                 portdb.porttree_root, "profiles")):
12836                                 # This is normal if $PORTDIR happens to be empty,
12837                                 # so don't warn about it.
12838                                 missing_repo_names.remove(portdb.porttree_root)
12839
12840         if missing_repo_names:
12841                 msg = []
12842                 msg.append("WARNING: One or more repositories " + \
12843                         "have missing repo_name entries:")
12844                 msg.append("")
12845                 for p in missing_repo_names:
12846                         msg.append("\t%s/profiles/repo_name" % (p,))
12847                 msg.append("")
12848                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
12849                         "should be a plain text file containing a unique " + \
12850                         "name for the repository on the first line.", 70))
12851                 writemsg_level("".join("%s\n" % l for l in msg),
12852                         level=logging.WARNING, noiselevel=-1)
12853
12854         return bool(missing_repo_names)
12855
12856 def repo_name_duplicate_check(trees):
12857         ignored_repos = {}
12858         for root, root_trees in trees.iteritems():
12859                 if 'porttree' in root_trees:
12860                         portdb = root_trees['porttree'].dbapi
12861                         if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
12862                                 for repo_name, paths in portdb._ignored_repos:
12863                                         k = (root, repo_name, portdb.getRepositoryPath(repo_name))
12864                                         ignored_repos.setdefault(k, []).extend(paths)
12865
12866         if ignored_repos:
12867                 msg = []
12868                 msg.append('WARNING: One or more repositories ' + \
12869                         'have been ignored due to duplicate')
12870                 msg.append('  profiles/repo_name entries:')
12871                 msg.append('')
12872                 for k in sorted(ignored_repos):
12873                         msg.append('  %s overrides' % (k,))
12874                         for path in ignored_repos[k]:
12875                                 msg.append('    %s' % (path,))
12876                         msg.append('')
12877                 msg.extend('  ' + x for x in textwrap.wrap(
12878                         "All profiles/repo_name entries must be unique in order " + \
12879                         "to avoid having duplicates ignored. " + \
12880                         "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
12881                         "/etc/make.conf if you would like to disable this warning."))
12882                 writemsg_level(''.join('%s\n' % l for l in msg),
12883                         level=logging.WARNING, noiselevel=-1)
12884
12885         return bool(ignored_repos)
12886
12887 def config_protect_check(trees):
12888         for root, root_trees in trees.iteritems():
12889                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
12890                         msg = "!!! CONFIG_PROTECT is empty"
12891                         if root != "/":
12892                                 msg += " for '%s'" % root
12893                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
12894
12895 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
12896
12897         if "--quiet" in myopts:
12898                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12899                 print "!!! one of the following fully-qualified ebuild names instead:\n"
12900                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12901                         print "    " + colorize("INFORM", cp)
12902                 return
12903
12904         s = search(root_config, spinner, "--searchdesc" in myopts,
12905                 "--quiet" not in myopts, "--usepkg" in myopts,
12906                 "--usepkgonly" in myopts)
12907         null_cp = portage.dep_getkey(insert_category_into_atom(
12908                 arg, "null"))
12909         cat, atom_pn = portage.catsplit(null_cp)
12910         s.searchkey = atom_pn
12911         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
12912                 s.addCP(cp)
12913         s.output()
12914         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
12915         print "!!! one of the above fully-qualified ebuild names instead.\n"
12916
12917 def profile_check(trees, myaction, myopts):
12918         if myaction in ("info", "sync"):
12919                 return os.EX_OK
12920         elif "--version" in myopts or "--help" in myopts:
12921                 return os.EX_OK
12922         for root, root_trees in trees.iteritems():
12923                 if root_trees["root_config"].settings.profiles:
12924                         continue
12925                 # generate some profile related warning messages
12926                 validate_ebuild_environment(trees)
12927                 msg = "If you have just changed your profile configuration, you " + \
12928                         "should revert back to the previous configuration. Due to " + \
12929                         "your current profile being invalid, allowed actions are " + \
12930                         "limited to --help, --info, --sync, and --version."
12931                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
12932                         level=logging.ERROR, noiselevel=-1)
12933                 return 1
12934         return os.EX_OK
12935
12936 def emerge_main():
12937         global portage  # NFC why this is necessary now - genone
12938         portage._disable_legacy_globals()
12939         # Disable color until we're sure that it should be enabled (after
12940         # EMERGE_DEFAULT_OPTS has been parsed).
12941         portage.output.havecolor = 0
12942         # This first pass is just for options that need to be known as early as
12943         # possible, such as --config-root.  They will be parsed again later,
12944         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
12945         # the value of --config-root).
12946         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
12947         if "--debug" in myopts:
12948                 os.environ["PORTAGE_DEBUG"] = "1"
12949         if "--config-root" in myopts:
12950                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
12951         if "--root" in myopts:
12952                 os.environ["ROOT"] = myopts["--root"]
12953
12954         # Portage needs to ensure a sane umask for the files it creates.
12955         os.umask(022)
12956         settings, trees, mtimedb = load_emerge_config()
12957         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12958         rval = profile_check(trees, myaction, myopts)
12959         if rval != os.EX_OK:
12960                 return rval
12961
12962         if portage._global_updates(trees, mtimedb["updates"]):
12963                 mtimedb.commit()
12964                 # Reload the whole config from scratch.
12965                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12966                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12967
12968         xterm_titles = "notitles" not in settings.features
12969
12970         tmpcmdline = []
12971         if "--ignore-default-opts" not in myopts:
12972                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
12973         tmpcmdline.extend(sys.argv[1:])
12974         myaction, myopts, myfiles = parse_opts(tmpcmdline)
12975
12976         if "--digest" in myopts:
12977                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
12978                 # Reload the whole config from scratch so that the portdbapi internal
12979                 # config is updated with new FEATURES.
12980                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12981                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12982
12983         for myroot in trees:
12984                 mysettings =  trees[myroot]["vartree"].settings
12985                 mysettings.unlock()
12986                 adjust_config(myopts, mysettings)
12987                 if '--pretend' not in myopts and myaction in \
12988                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
12989                         mysettings["PORTAGE_COUNTER_HASH"] = \
12990                                 trees[myroot]["vartree"].dbapi._counter_hash()
12991                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
12992                 mysettings.lock()
12993                 del myroot, mysettings
12994
12995         apply_priorities(settings)
12996
12997         spinner = stdout_spinner()
12998         if "candy" in settings.features:
12999                 spinner.update = spinner.update_scroll
13000
13001         if "--quiet" not in myopts:
13002                 portage.deprecated_profile_check(settings=settings)
13003                 repo_name_check(trees)
13004                 repo_name_duplicate_check(trees)
13005                 config_protect_check(trees)
13006
13007         for mytrees in trees.itervalues():
13008                 mydb = mytrees["porttree"].dbapi
13009                 # Freeze the portdbapi for performance (memoize all xmatch results).
13010                 mydb.freeze()
13011         del mytrees, mydb
13012
13013         if "moo" in myfiles:
13014                 print """
13015
13016   Larry loves Gentoo (""" + platform.system() + """)
13017
13018  _______________________
13019 < Have you mooed today? >
13020  -----------------------
13021         \   ^__^
13022          \  (oo)\_______
13023             (__)\       )\/\ 
13024                 ||----w |
13025                 ||     ||
13026
13027 """
13028
13029         for x in myfiles:
13030                 ext = os.path.splitext(x)[1]
13031                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13032                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13033                         break
13034
13035         root_config = trees[settings["ROOT"]]["root_config"]
13036         if myaction == "list-sets":
13037                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
13038                 sys.stdout.flush()
13039                 return os.EX_OK
13040
13041         # only expand sets for actions taking package arguments
13042         oldargs = myfiles[:]
13043         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13044                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13045                 if retval != os.EX_OK:
13046                         return retval
13047
13048                 # Need to handle empty sets specially, otherwise emerge will react 
13049                 # with the help message for empty argument lists
13050                 if oldargs and not myfiles:
13051                         print "emerge: no targets left after set expansion"
13052                         return 0
13053
13054         if ("--tree" in myopts) and ("--columns" in myopts):
13055                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13056                 return 1
13057
13058         if ("--quiet" in myopts):
13059                 spinner.update = spinner.update_quiet
13060                 portage.util.noiselimit = -1
13061
13062         # Always create packages if FEATURES=buildpkg
13063         # Imply --buildpkg if --buildpkgonly
13064         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13065                 if "--buildpkg" not in myopts:
13066                         myopts["--buildpkg"] = True
13067
13068         # Always try and fetch binary packages if FEATURES=getbinpkg
13069         if ("getbinpkg" in settings.features):
13070                 myopts["--getbinpkg"] = True
13071
13072         if "--buildpkgonly" in myopts:
13073                 # --buildpkgonly will not merge anything, so
13074                 # it cancels all binary package options.
13075                 for opt in ("--getbinpkg", "--getbinpkgonly",
13076                         "--usepkg", "--usepkgonly"):
13077                         myopts.pop(opt, None)
13078
13079         if "--fetch-all-uri" in myopts:
13080                 myopts["--fetchonly"] = True
13081
13082         if "--skipfirst" in myopts and "--resume" not in myopts:
13083                 myopts["--resume"] = True
13084
13085         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13086                 myopts["--usepkgonly"] = True
13087
13088         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13089                 myopts["--getbinpkg"] = True
13090
13091         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13092                 myopts["--usepkg"] = True
13093
13094         # Also allow -K to apply --usepkg/-k
13095         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13096                 myopts["--usepkg"] = True
13097
13098         # Allow -p to remove --ask
13099         if "--pretend" in myopts:
13100                 myopts.pop("--ask", None)
13101
13102         # forbid --ask when not in a terminal
13103         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13104         if ("--ask" in myopts) and (not sys.stdin.isatty()):
13105                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13106                         noiselevel=-1)
13107                 return 1
13108
13109         if settings.get("PORTAGE_DEBUG", "") == "1":
13110                 spinner.update = spinner.update_quiet
13111                 portage.debug=1
13112                 if "python-trace" in settings.features:
13113                         import portage.debug
13114                         portage.debug.set_trace(True)
13115
13116         if not ("--quiet" in myopts):
13117                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13118                         spinner.update = spinner.update_basic
13119
13120         if myaction == 'version':
13121                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13122                         settings.profile_path, settings["CHOST"],
13123                         trees[settings["ROOT"]]["vartree"].dbapi)
13124                 return 0
13125         elif "--help" in myopts:
13126                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13127                 return 0
13128
13129         if "--debug" in myopts:
13130                 print "myaction", myaction
13131                 print "myopts", myopts
13132
13133         if not myaction and not myfiles and "--resume" not in myopts:
13134                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13135                 return 1
13136
13137         pretend = "--pretend" in myopts
13138         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13139         buildpkgonly = "--buildpkgonly" in myopts
13140
13141         # check if root user is the current user for the actions where emerge needs this
13142         if portage.secpass < 2:
13143                 # We've already allowed "--version" and "--help" above.
13144                 if "--pretend" not in myopts and myaction not in ("search","info"):
13145                         need_superuser = myaction in ('clean', 'depclean', 'deselect',
13146                                 'prune', 'unmerge') or not \
13147                                 (fetchonly or \
13148                                 (buildpkgonly and secpass >= 1) or \
13149                                 myaction in ("metadata", "regen") or \
13150                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13151                         if portage.secpass < 1 or \
13152                                 need_superuser:
13153                                 if need_superuser:
13154                                         access_desc = "superuser"
13155                                 else:
13156                                         access_desc = "portage group"
13157                                 # Always show portage_group_warning() when only portage group
13158                                 # access is required but the user is not in the portage group.
13159                                 from portage.data import portage_group_warning
13160                                 if "--ask" in myopts:
13161                                         myopts["--pretend"] = True
13162                                         del myopts["--ask"]
13163                                         print ("%s access is required... " + \
13164                                                 "adding --pretend to options\n") % access_desc
13165                                         if portage.secpass < 1 and not need_superuser:
13166                                                 portage_group_warning()
13167                                 else:
13168                                         sys.stderr.write(("emerge: %s access is required\n") \
13169                                                 % access_desc)
13170                                         if portage.secpass < 1 and not need_superuser:
13171                                                 portage_group_warning()
13172                                         return 1
13173
13174         disable_emergelog = False
13175         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13176                 if x in myopts:
13177                         disable_emergelog = True
13178                         break
13179         if myaction in ("search", "info"):
13180                 disable_emergelog = True
13181         if disable_emergelog:
13182                 """ Disable emergelog for everything except build or unmerge
13183                 operations.  This helps minimize parallel emerge.log entries that can
13184                 confuse log parsers.  We especially want it disabled during
13185                 parallel-fetch, which uses --resume --fetchonly."""
13186                 global emergelog
13187                 def emergelog(*pargs, **kargs):
13188                         pass
13189
13190         else:
13191                 if 'EMERGE_LOG_DIR' in settings:
13192                         try:
13193                                 # At least the parent needs to exist for the lock file.
13194                                 portage.util.ensure_dirs(settings['EMERGE_LOG_DIR'])
13195                         except portage.exception.PortageException, e:
13196                                 writemsg_level("!!! Error creating directory for " + \
13197                                         "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
13198                                         (settings['EMERGE_LOG_DIR'], e),
13199                                         noiselevel=-1, level=logging.ERROR)
13200                         else:
13201                                 global _emerge_log_dir
13202                                 _emerge_log_dir = settings['EMERGE_LOG_DIR']
13203
13204         if not "--pretend" in myopts:
13205                 emergelog(xterm_titles, "Started emerge on: "+\
13206                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13207                 myelogstr=""
13208                 if myopts:
13209                         myelogstr=" ".join(myopts)
13210                 if myaction:
13211                         myelogstr+=" "+myaction
13212                 if myfiles:
13213                         myelogstr += " " + " ".join(oldargs)
13214                 emergelog(xterm_titles, " *** emerge " + myelogstr)
13215         del oldargs
13216
13217         def emergeexitsig(signum, frame):
13218                 signal.signal(signal.SIGINT, signal.SIG_IGN)
13219                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13220                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13221                 sys.exit(100+signum)
13222         signal.signal(signal.SIGINT, emergeexitsig)
13223         signal.signal(signal.SIGTERM, emergeexitsig)
13224
13225         def emergeexit():
13226                 """This gets out final log message in before we quit."""
13227                 if "--pretend" not in myopts:
13228                         emergelog(xterm_titles, " *** terminating.")
13229                 if "notitles" not in settings.features:
13230                         xtermTitleReset()
13231         portage.atexit_register(emergeexit)
13232
13233         if myaction in ("config", "metadata", "regen", "sync"):
13234                 if "--pretend" in myopts:
13235                         sys.stderr.write(("emerge: The '%s' action does " + \
13236                                 "not support '--pretend'.\n") % myaction)
13237                         return 1
13238
13239         if "sync" == myaction:
13240                 return action_sync(settings, trees, mtimedb, myopts, myaction)
13241         elif "metadata" == myaction:
13242                 action_metadata(settings, portdb, myopts)
13243         elif myaction=="regen":
13244                 validate_ebuild_environment(trees)
13245                 return action_regen(settings, portdb, myopts.get("--jobs"),
13246                         myopts.get("--load-average"))
13247         # HELP action
13248         elif "config"==myaction:
13249                 validate_ebuild_environment(trees)
13250                 action_config(settings, trees, myopts, myfiles)
13251
13252         # SEARCH action
13253         elif "search"==myaction:
13254                 validate_ebuild_environment(trees)
13255                 action_search(trees[settings["ROOT"]]["root_config"],
13256                         myopts, myfiles, spinner)
13257
13258         elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
13259                 validate_ebuild_environment(trees)
13260                 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
13261                         myopts, myaction, myfiles, spinner)
13262                 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
13263                         post_emerge(root_config, myopts, mtimedb, rval)
13264                 return rval
13265
13266         elif myaction == 'info':
13267
13268                 # Ensure atoms are valid before calling unmerge().
13269                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13270                 valid_atoms = []
13271                 for x in myfiles:
13272                         if is_valid_package_atom(x):
13273                                 try:
13274                                         valid_atoms.append(
13275                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
13276                                 except portage.exception.AmbiguousPackageName, e:
13277                                         msg = "The short ebuild name \"" + x + \
13278                                                 "\" is ambiguous.  Please specify " + \
13279                                                 "one of the following " + \
13280                                                 "fully-qualified ebuild names instead:"
13281                                         for line in textwrap.wrap(msg, 70):
13282                                                 writemsg_level("!!! %s\n" % (line,),
13283                                                         level=logging.ERROR, noiselevel=-1)
13284                                         for i in e[0]:
13285                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
13286                                                         level=logging.ERROR, noiselevel=-1)
13287                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13288                                         return 1
13289                                 continue
13290                         msg = []
13291                         msg.append("'%s' is not a valid package atom." % (x,))
13292                         msg.append("Please check ebuild(5) for full details.")
13293                         writemsg_level("".join("!!! %s\n" % line for line in msg),
13294                                 level=logging.ERROR, noiselevel=-1)
13295                         return 1
13296
13297                 return action_info(settings, trees, myopts, valid_atoms)
13298
13299         # "update", "system", or just process files:
13300         else:
13301                 validate_ebuild_environment(trees)
13302
13303                 for x in myfiles:
13304                         if x.startswith(SETPREFIX) or \
13305                                 is_valid_package_atom(x):
13306                                 continue
13307                         if x[:1] == os.sep:
13308                                 continue
13309                         try:
13310                                 os.lstat(x)
13311                                 continue
13312                         except OSError:
13313                                 pass
13314                         msg = []
13315                         msg.append("'%s' is not a valid package atom." % (x,))
13316                         msg.append("Please check ebuild(5) for full details.")
13317                         writemsg_level("".join("!!! %s\n" % line for line in msg),
13318                                 level=logging.ERROR, noiselevel=-1)
13319                         return 1
13320
13321                 if "--pretend" not in myopts:
13322                         display_news_notification(root_config, myopts)
13323                 retval = action_build(settings, trees, mtimedb,
13324                         myopts, myaction, myfiles, spinner)
13325                 root_config = trees[settings["ROOT"]]["root_config"]
13326                 post_emerge(root_config, myopts, mtimedb, retval)
13327
13328                 return retval