Tweak _eclasses_ logic inside action_metadata().
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id$
5
6 import array
7 import codecs
8 from collections import deque
9 import fcntl
10 import formatter
11 import logging
12 import pwd
13 import select
14 import shlex
15 import shutil
16 import signal
17 import sys
18 import textwrap
19 import urlparse
20 import weakref
21 import gc
22 import os, stat
23 import platform
24
25 try:
26         import portage
27 except ImportError:
28         from os import path as osp
29         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
30         import portage
31
32 from portage import digraph
33 from portage.const import NEWS_LIB_PATH
34
35 import _emerge.help
36 import portage.xpak, commands, errno, re, socket, time
37 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
38         nc_len, red, teal, turquoise, xtermTitle, \
39         xtermTitleReset, yellow
40 from portage.output import create_color_func
41 good = create_color_func("GOOD")
42 bad = create_color_func("BAD")
43 # white looks bad on terminals with white background
44 from portage.output import bold as white
45
46 import portage.elog
47 import portage.dep
48 portage.dep._dep_check_strict = True
49 import portage.util
50 import portage.locks
51 import portage.exception
52 from portage.cache.cache_errors import CacheError
53 from portage.data import secpass
54 from portage.elog.messages import eerror
55 from portage.util import normalize_path as normpath
56 from portage.util import cmp_sort_key, writemsg, writemsg_level
57 from portage.sets import load_default_config, SETPREFIX
58 from portage.sets.base import InternalPackageSet
59
60 from itertools import chain, izip
61
62 try:
63         import cPickle as pickle
64 except ImportError:
65         import pickle
66
67 try:
68         from cStringIO import StringIO
69 except ImportError:
70         from StringIO import StringIO
71
72 class stdout_spinner(object):
73         scroll_msgs = [
74                 "Gentoo Rocks ("+platform.system()+")",
75                 "Thank you for using Gentoo. :)",
76                 "Are you actually trying to read this?",
77                 "How many times have you stared at this?",
78                 "We are generating the cache right now",
79                 "You are paying too much attention.",
80                 "A theory is better than its explanation.",
81                 "Phasers locked on target, Captain.",
82                 "Thrashing is just virtual crashing.",
83                 "To be is to program.",
84                 "Real Users hate Real Programmers.",
85                 "When all else fails, read the instructions.",
86                 "Functionality breeds Contempt.",
87                 "The future lies ahead.",
88                 "3.1415926535897932384626433832795028841971694",
89                 "Sometimes insanity is the only alternative.",
90                 "Inaccuracy saves a world of explanation.",
91         ]
92
93         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
94
95         def __init__(self):
96                 self.spinpos = 0
97                 self.update = self.update_twirl
98                 self.scroll_sequence = self.scroll_msgs[
99                         int(time.time() * 100) % len(self.scroll_msgs)]
100                 self.last_update = 0
101                 self.min_display_latency = 0.05
102
103         def _return_early(self):
104                 """
105                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
106                 each update* method should return without doing any output when this
107                 method returns True.
108                 """
109                 cur_time = time.time()
110                 if cur_time - self.last_update < self.min_display_latency:
111                         return True
112                 self.last_update = cur_time
113                 return False
114
115         def update_basic(self):
116                 self.spinpos = (self.spinpos + 1) % 500
117                 if self._return_early():
118                         return
119                 if (self.spinpos % 100) == 0:
120                         if self.spinpos == 0:
121                                 sys.stdout.write(". ")
122                         else:
123                                 sys.stdout.write(".")
124                 sys.stdout.flush()
125
126         def update_scroll(self):
127                 if self._return_early():
128                         return
129                 if(self.spinpos >= len(self.scroll_sequence)):
130                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
131                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
132                 else:
133                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
134                 sys.stdout.flush()
135                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
136
137         def update_twirl(self):
138                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
139                 if self._return_early():
140                         return
141                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
142                 sys.stdout.flush()
143
144         def update_quiet(self):
145                 return
146
147 def userquery(prompt, responses=None, colours=None):
148         """Displays a prompt and a set of responses, then waits for a response
149         which is checked against the responses and the first to match is
150         returned.  An empty response will match the first value in responses.  The
151         input buffer is *not* cleared prior to the prompt!
152
153         prompt: a String.
154         responses: a List of Strings.
155         colours: a List of Functions taking and returning a String, used to
156         process the responses for display. Typically these will be functions
157         like red() but could be e.g. lambda x: "DisplayString".
158         If responses is omitted, defaults to ["Yes", "No"], [green, red].
159         If only colours is omitted, defaults to [bold, ...].
160
161         Returns a member of the List responses. (If called without optional
162         arguments, returns "Yes" or "No".)
163         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
164         printed."""
165         if responses is None:
166                 responses = ["Yes", "No"]
167                 colours = [
168                         create_color_func("PROMPT_CHOICE_DEFAULT"),
169                         create_color_func("PROMPT_CHOICE_OTHER")
170                 ]
171         elif colours is None:
172                 colours=[bold]
173         colours=(colours*len(responses))[:len(responses)]
174         print bold(prompt),
175         try:
176                 while True:
177                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
178                         for key in responses:
179                                 # An empty response will match the first value in responses.
180                                 if response.upper()==key[:len(response)].upper():
181                                         return key
182                         print "Sorry, response '%s' not understood." % response,
183         except (EOFError, KeyboardInterrupt):
184                 print "Interrupted."
185                 sys.exit(1)
186
187 actions = frozenset([
188 "clean", "config", "depclean",
189 "info", "list-sets", "metadata",
190 "prune", "regen",  "search",
191 "sync",  "unmerge", "version",
192 ])
193 options=[
194 "--ask",          "--alphabetical",
195 "--buildpkg",     "--buildpkgonly",
196 "--changelog",    "--columns",
197 "--complete-graph",
198 "--debug",        "--deep",
199 "--digest",
200 "--emptytree",
201 "--fetchonly",    "--fetch-all-uri",
202 "--getbinpkg",    "--getbinpkgonly",
203 "--help",         "--ignore-default-opts",
204 "--keep-going",
205 "--noconfmem",
206 "--newuse",
207 "--nodeps",       "--noreplace",
208 "--nospinner",    "--oneshot",
209 "--onlydeps",     "--pretend",
210 "--quiet",        "--resume",
211 "--searchdesc",   "--selective",
212 "--skipfirst",
213 "--tree",
214 "--update",
215 "--usepkg",       "--usepkgonly",
216 "--verbose",
217 ]
218
219 shortmapping={
220 "1":"--oneshot",
221 "a":"--ask",
222 "b":"--buildpkg",  "B":"--buildpkgonly",
223 "c":"--clean",     "C":"--unmerge",
224 "d":"--debug",     "D":"--deep",
225 "e":"--emptytree",
226 "f":"--fetchonly", "F":"--fetch-all-uri",
227 "g":"--getbinpkg", "G":"--getbinpkgonly",
228 "h":"--help",
229 "k":"--usepkg",    "K":"--usepkgonly",
230 "l":"--changelog",
231 "n":"--noreplace", "N":"--newuse",
232 "o":"--onlydeps",  "O":"--nodeps",
233 "p":"--pretend",   "P":"--prune",
234 "q":"--quiet",
235 "s":"--search",    "S":"--searchdesc",
236 "t":"--tree",
237 "u":"--update",
238 "v":"--verbose",   "V":"--version"
239 }
240
241 def emergelog(xterm_titles, mystr, short_msg=None):
242         if xterm_titles and short_msg:
243                 if "HOSTNAME" in os.environ:
244                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
245                 xtermTitle(short_msg)
246         try:
247                 file_path = "/var/log/emerge.log"
248                 mylogfile = open(file_path, "a")
249                 portage.util.apply_secpass_permissions(file_path,
250                         uid=portage.portage_uid, gid=portage.portage_gid,
251                         mode=0660)
252                 mylock = None
253                 try:
254                         mylock = portage.locks.lockfile(mylogfile)
255                         # seek because we may have gotten held up by the lock.
256                         # if so, we may not be positioned at the end of the file.
257                         mylogfile.seek(0, 2)
258                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
259                         mylogfile.flush()
260                 finally:
261                         if mylock:
262                                 portage.locks.unlockfile(mylock)
263                         mylogfile.close()
264         except (IOError,OSError,portage.exception.PortageException), e:
265                 if secpass >= 1:
266                         print >> sys.stderr, "emergelog():",e
267
268 def countdown(secs=5, doing="Starting"):
269         if secs:
270                 print ">>> Waiting",secs,"seconds before starting..."
271                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
272                 ticks=range(secs)
273                 ticks.reverse()
274                 for sec in ticks:
275                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
276                         sys.stdout.flush()
277                         time.sleep(1)
278                 print
279
280 # formats a size given in bytes nicely
281 def format_size(mysize):
282         if isinstance(mysize, basestring):
283                 return mysize
284         if 0 != mysize % 1024:
285                 # Always round up to the next kB so that it doesn't show 0 kB when
286                 # some small file still needs to be fetched.
287                 mysize += 1024 - mysize % 1024
288         mystr=str(mysize/1024)
289         mycount=len(mystr)
290         while (mycount > 3):
291                 mycount-=3
292                 mystr=mystr[:mycount]+","+mystr[mycount:]
293         return mystr+" kB"
294
295
296 def getgccversion(chost):
297         """
298         rtype: C{str}
299         return:  the current in-use gcc version
300         """
301
302         gcc_ver_command = 'gcc -dumpversion'
303         gcc_ver_prefix = 'gcc-'
304
305         gcc_not_found_error = red(
306         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
307         "!!! to update the environment of this terminal and possibly\n" +
308         "!!! other terminals also.\n"
309         )
310
311         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
312         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
313                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
314
315         mystatus, myoutput = commands.getstatusoutput(
316                 chost + "-" + gcc_ver_command)
317         if mystatus == os.EX_OK:
318                 return gcc_ver_prefix + myoutput
319
320         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
321         if mystatus == os.EX_OK:
322                 return gcc_ver_prefix + myoutput
323
324         portage.writemsg(gcc_not_found_error, noiselevel=-1)
325         return "[unavailable]"
326
327 def getportageversion(portdir, target_root, profile, chost, vardb):
328         profilever = "unavailable"
329         if profile:
330                 realpath = os.path.realpath(profile)
331                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
332                 if realpath.startswith(basepath):
333                         profilever = realpath[1 + len(basepath):]
334                 else:
335                         try:
336                                 profilever = "!" + os.readlink(profile)
337                         except (OSError):
338                                 pass
339                 del realpath, basepath
340
341         libcver=[]
342         libclist  = vardb.match("virtual/libc")
343         libclist += vardb.match("virtual/glibc")
344         libclist  = portage.util.unique_array(libclist)
345         for x in libclist:
346                 xs=portage.catpkgsplit(x)
347                 if libcver:
348                         libcver+=","+"-".join(xs[1:])
349                 else:
350                         libcver="-".join(xs[1:])
351         if libcver==[]:
352                 libcver="unavailable"
353
354         gccver = getgccversion(chost)
355         unameout=platform.release()+" "+platform.machine()
356
357         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
358
359 def create_depgraph_params(myopts, myaction):
360         #configure emerge engine parameters
361         #
362         # self:      include _this_ package regardless of if it is merged.
363         # selective: exclude the package if it is merged
364         # recurse:   go into the dependencies
365         # deep:      go into the dependencies of already merged packages
366         # empty:     pretend nothing is merged
367         # complete:  completely account for all known dependencies
368         # remove:    build graph for use in removing packages
369         myparams = set(["recurse"])
370
371         if myaction == "remove":
372                 myparams.add("remove")
373                 myparams.add("complete")
374                 return myparams
375
376         if "--update" in myopts or \
377                 "--newuse" in myopts or \
378                 "--reinstall" in myopts or \
379                 "--noreplace" in myopts:
380                 myparams.add("selective")
381         if "--emptytree" in myopts:
382                 myparams.add("empty")
383                 myparams.discard("selective")
384         if "--nodeps" in myopts:
385                 myparams.discard("recurse")
386         if "--deep" in myopts:
387                 myparams.add("deep")
388         if "--complete-graph" in myopts:
389                 myparams.add("complete")
390         return myparams
391
392 # search functionality
393 class search(object):
394
395         #
396         # class constants
397         #
398         VERSION_SHORT=1
399         VERSION_RELEASE=2
400
401         #
402         # public interface
403         #
404         def __init__(self, root_config, spinner, searchdesc,
405                 verbose, usepkg, usepkgonly):
406                 """Searches the available and installed packages for the supplied search key.
407                 The list of available and installed packages is created at object instantiation.
408                 This makes successive searches faster."""
409                 self.settings = root_config.settings
410                 self.vartree = root_config.trees["vartree"]
411                 self.spinner = spinner
412                 self.verbose = verbose
413                 self.searchdesc = searchdesc
414                 self.root_config = root_config
415                 self.setconfig = root_config.setconfig
416                 self.matches = {"pkg" : []}
417                 self.mlen = 0
418
419                 def fake_portdb():
420                         pass
421                 self.portdb = fake_portdb
422                 for attrib in ("aux_get", "cp_all",
423                         "xmatch", "findname", "getFetchMap"):
424                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
425
426                 self._dbs = []
427
428                 portdb = root_config.trees["porttree"].dbapi
429                 bindb = root_config.trees["bintree"].dbapi
430                 vardb = root_config.trees["vartree"].dbapi
431
432                 if not usepkgonly and portdb._have_root_eclass_dir:
433                         self._dbs.append(portdb)
434
435                 if (usepkg or usepkgonly) and bindb.cp_all():
436                         self._dbs.append(bindb)
437
438                 self._dbs.append(vardb)
439                 self._portdb = portdb
440
441         def _cp_all(self):
442                 cp_all = set()
443                 for db in self._dbs:
444                         cp_all.update(db.cp_all())
445                 return list(sorted(cp_all))
446
447         def _aux_get(self, *args, **kwargs):
448                 for db in self._dbs:
449                         try:
450                                 return db.aux_get(*args, **kwargs)
451                         except KeyError:
452                                 pass
453                 raise
454
455         def _findname(self, *args, **kwargs):
456                 for db in self._dbs:
457                         if db is not self._portdb:
458                                 # We don't want findname to return anything
459                                 # unless it's an ebuild in a portage tree.
460                                 # Otherwise, it's already built and we don't
461                                 # care about it.
462                                 continue
463                         func = getattr(db, "findname", None)
464                         if func:
465                                 value = func(*args, **kwargs)
466                                 if value:
467                                         return value
468                 return None
469
470         def _getFetchMap(self, *args, **kwargs):
471                 for db in self._dbs:
472                         func = getattr(db, "getFetchMap", None)
473                         if func:
474                                 value = func(*args, **kwargs)
475                                 if value:
476                                         return value
477                 return {}
478
479         def _visible(self, db, cpv, metadata):
480                 installed = db is self.vartree.dbapi
481                 built = installed or db is not self._portdb
482                 pkg_type = "ebuild"
483                 if installed:
484                         pkg_type = "installed"
485                 elif built:
486                         pkg_type = "binary"
487                 return visible(self.settings,
488                         Package(type_name=pkg_type, root_config=self.root_config,
489                         cpv=cpv, built=built, installed=installed, metadata=metadata))
490
491         def _xmatch(self, level, atom):
492                 """
493                 This method does not expand old-style virtuals because it
494                 is restricted to returning matches for a single ${CATEGORY}/${PN}
495                 and old-style virual matches unreliable for that when querying
496                 multiple package databases. If necessary, old-style virtuals
497                 can be performed on atoms prior to calling this method.
498                 """
499                 cp = portage.dep_getkey(atom)
500                 if level == "match-all":
501                         matches = set()
502                         for db in self._dbs:
503                                 if hasattr(db, "xmatch"):
504                                         matches.update(db.xmatch(level, atom))
505                                 else:
506                                         matches.update(db.match(atom))
507                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
508                         db._cpv_sort_ascending(result)
509                 elif level == "match-visible":
510                         matches = set()
511                         for db in self._dbs:
512                                 if hasattr(db, "xmatch"):
513                                         matches.update(db.xmatch(level, atom))
514                                 else:
515                                         db_keys = list(db._aux_cache_keys)
516                                         for cpv in db.match(atom):
517                                                 metadata = izip(db_keys,
518                                                         db.aux_get(cpv, db_keys))
519                                                 if not self._visible(db, cpv, metadata):
520                                                         continue
521                                                 matches.add(cpv)
522                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
523                         db._cpv_sort_ascending(result)
524                 elif level == "bestmatch-visible":
525                         result = None
526                         for db in self._dbs:
527                                 if hasattr(db, "xmatch"):
528                                         cpv = db.xmatch("bestmatch-visible", atom)
529                                         if not cpv or portage.cpv_getkey(cpv) != cp:
530                                                 continue
531                                         if not result or cpv == portage.best([cpv, result]):
532                                                 result = cpv
533                                 else:
534                                         db_keys = Package.metadata_keys
535                                         # break out of this loop with highest visible
536                                         # match, checked in descending order
537                                         for cpv in reversed(db.match(atom)):
538                                                 if portage.cpv_getkey(cpv) != cp:
539                                                         continue
540                                                 metadata = izip(db_keys,
541                                                         db.aux_get(cpv, db_keys))
542                                                 if not self._visible(db, cpv, metadata):
543                                                         continue
544                                                 if not result or cpv == portage.best([cpv, result]):
545                                                         result = cpv
546                                                 break
547                 else:
548                         raise NotImplementedError(level)
549                 return result
550
551         def execute(self,searchkey):
552                 """Performs the search for the supplied search key"""
553                 match_category = 0
554                 self.searchkey=searchkey
555                 self.packagematches = []
556                 if self.searchdesc:
557                         self.searchdesc=1
558                         self.matches = {"pkg":[], "desc":[], "set":[]}
559                 else:
560                         self.searchdesc=0
561                         self.matches = {"pkg":[], "set":[]}
562                 print "Searching...   ",
563
564                 regexsearch = False
565                 if self.searchkey.startswith('%'):
566                         regexsearch = True
567                         self.searchkey = self.searchkey[1:]
568                 if self.searchkey.startswith('@'):
569                         match_category = 1
570                         self.searchkey = self.searchkey[1:]
571                 if regexsearch:
572                         self.searchre=re.compile(self.searchkey,re.I)
573                 else:
574                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
575                 for package in self.portdb.cp_all():
576                         self.spinner.update()
577
578                         if match_category:
579                                 match_string  = package[:]
580                         else:
581                                 match_string  = package.split("/")[-1]
582
583                         masked=0
584                         if self.searchre.search(match_string):
585                                 if not self.portdb.xmatch("match-visible", package):
586                                         masked=1
587                                 self.matches["pkg"].append([package,masked])
588                         elif self.searchdesc: # DESCRIPTION searching
589                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
590                                 if not full_package:
591                                         #no match found; we don't want to query description
592                                         full_package = portage.best(
593                                                 self.portdb.xmatch("match-all", package))
594                                         if not full_package:
595                                                 continue
596                                         else:
597                                                 masked=1
598                                 try:
599                                         full_desc = self.portdb.aux_get(
600                                                 full_package, ["DESCRIPTION"])[0]
601                                 except KeyError:
602                                         print "emerge: search: aux_get() failed, skipping"
603                                         continue
604                                 if self.searchre.search(full_desc):
605                                         self.matches["desc"].append([full_package,masked])
606
607                 self.sdict = self.setconfig.getSets()
608                 for setname in self.sdict:
609                         self.spinner.update()
610                         if match_category:
611                                 match_string = setname
612                         else:
613                                 match_string = setname.split("/")[-1]
614                         
615                         if self.searchre.search(match_string):
616                                 self.matches["set"].append([setname, False])
617                         elif self.searchdesc:
618                                 if self.searchre.search(
619                                         self.sdict[setname].getMetadata("DESCRIPTION")):
620                                         self.matches["set"].append([setname, False])
621                         
622                 self.mlen=0
623                 for mtype in self.matches:
624                         self.matches[mtype].sort()
625                         self.mlen += len(self.matches[mtype])
626
627         def addCP(self, cp):
628                 if not self.portdb.xmatch("match-all", cp):
629                         return
630                 masked = 0
631                 if not self.portdb.xmatch("bestmatch-visible", cp):
632                         masked = 1
633                 self.matches["pkg"].append([cp, masked])
634                 self.mlen += 1
635
636         def output(self):
637                 """Outputs the results of the search."""
638                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
639                 print "[ Applications found : "+white(str(self.mlen))+" ]"
640                 print " "
641                 vardb = self.vartree.dbapi
642                 for mtype in self.matches:
643                         for match,masked in self.matches[mtype]:
644                                 full_package = None
645                                 if mtype == "pkg":
646                                         catpack = match
647                                         full_package = self.portdb.xmatch(
648                                                 "bestmatch-visible", match)
649                                         if not full_package:
650                                                 #no match found; we don't want to query description
651                                                 masked=1
652                                                 full_package = portage.best(
653                                                         self.portdb.xmatch("match-all",match))
654                                 elif mtype == "desc":
655                                         full_package = match
656                                         match        = portage.cpv_getkey(match)
657                                 elif mtype == "set":
658                                         print green("*")+"  "+white(match)
659                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
660                                         print
661                                 if full_package:
662                                         try:
663                                                 desc, homepage, license = self.portdb.aux_get(
664                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
665                                         except KeyError:
666                                                 print "emerge: search: aux_get() failed, skipping"
667                                                 continue
668                                         if masked:
669                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
670                                         else:
671                                                 print green("*")+"  "+white(match)
672                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
673
674                                         mysum = [0,0]
675                                         file_size_str = None
676                                         mycat = match.split("/")[0]
677                                         mypkg = match.split("/")[1]
678                                         mycpv = match + "-" + myversion
679                                         myebuild = self.portdb.findname(mycpv)
680                                         if myebuild:
681                                                 pkgdir = os.path.dirname(myebuild)
682                                                 from portage import manifest
683                                                 mf = manifest.Manifest(
684                                                         pkgdir, self.settings["DISTDIR"])
685                                                 try:
686                                                         uri_map = self.portdb.getFetchMap(mycpv)
687                                                 except portage.exception.InvalidDependString, e:
688                                                         file_size_str = "Unknown (%s)" % (e,)
689                                                         del e
690                                                 else:
691                                                         try:
692                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
693                                                         except KeyError, e:
694                                                                 file_size_str = "Unknown (missing " + \
695                                                                         "digest for %s)" % (e,)
696                                                                 del e
697
698                                         available = False
699                                         for db in self._dbs:
700                                                 if db is not vardb and \
701                                                         db.cpv_exists(mycpv):
702                                                         available = True
703                                                         if not myebuild and hasattr(db, "bintree"):
704                                                                 myebuild = db.bintree.getname(mycpv)
705                                                                 try:
706                                                                         mysum[0] = os.stat(myebuild).st_size
707                                                                 except OSError:
708                                                                         myebuild = None
709                                                         break
710
711                                         if myebuild and file_size_str is None:
712                                                 mystr = str(mysum[0] / 1024)
713                                                 mycount = len(mystr)
714                                                 while (mycount > 3):
715                                                         mycount -= 3
716                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
717                                                 file_size_str = mystr + " kB"
718
719                                         if self.verbose:
720                                                 if available:
721                                                         print "     ", darkgreen("Latest version available:"),myversion
722                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
723                                                 if myebuild:
724                                                         print "      %s %s" % \
725                                                                 (darkgreen("Size of files:"), file_size_str)
726                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
727                                                 print "     ", darkgreen("Description:")+"  ",desc
728                                                 print "     ", darkgreen("License:")+"      ",license
729                                                 print
730         #
731         # private interface
732         #
733         def getInstallationStatus(self,package):
734                 installed_package = self.vartree.dep_bestmatch(package)
735                 result = ""
736                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
737                 if len(version) > 0:
738                         result = darkgreen("Latest version installed:")+" "+version
739                 else:
740                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
741                 return result
742
743         def getVersion(self,full_package,detail):
744                 if len(full_package) > 1:
745                         package_parts = portage.catpkgsplit(full_package)
746                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
747                                 result = package_parts[2]+ "-" + package_parts[3]
748                         else:
749                                 result = package_parts[2]
750                 else:
751                         result = ""
752                 return result
753
754 class RootConfig(object):
755         """This is used internally by depgraph to track information about a
756         particular $ROOT."""
757
758         pkg_tree_map = {
759                 "ebuild"    : "porttree",
760                 "binary"    : "bintree",
761                 "installed" : "vartree"
762         }
763
764         tree_pkg_map = {}
765         for k, v in pkg_tree_map.iteritems():
766                 tree_pkg_map[v] = k
767
768         def __init__(self, settings, trees, setconfig):
769                 self.trees = trees
770                 self.settings = settings
771                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
772                 self.root = self.settings["ROOT"]
773                 self.setconfig = setconfig
774                 if setconfig is None:
775                         self.sets = {}
776                 else:
777                         self.sets = self.setconfig.getSets()
778                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
779
780 def create_world_atom(pkg, args_set, root_config):
781         """Create a new atom for the world file if one does not exist.  If the
782         argument atom is precise enough to identify a specific slot then a slot
783         atom will be returned. Atoms that are in the system set may also be stored
784         in world since system atoms can only match one slot while world atoms can
785         be greedy with respect to slots.  Unslotted system packages will not be
786         stored in world."""
787
788         arg_atom = args_set.findAtomForPackage(pkg)
789         if not arg_atom:
790                 return None
791         cp = portage.dep_getkey(arg_atom)
792         new_world_atom = cp
793         sets = root_config.sets
794         portdb = root_config.trees["porttree"].dbapi
795         vardb = root_config.trees["vartree"].dbapi
796         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
797                 for cpv in portdb.match(cp))
798         slotted = len(available_slots) > 1 or \
799                 (len(available_slots) == 1 and "0" not in available_slots)
800         if not slotted:
801                 # check the vdb in case this is multislot
802                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
803                         for cpv in vardb.match(cp))
804                 slotted = len(available_slots) > 1 or \
805                         (len(available_slots) == 1 and "0" not in available_slots)
806         if slotted and arg_atom != cp:
807                 # If the user gave a specific atom, store it as a
808                 # slot atom in the world file.
809                 slot_atom = pkg.slot_atom
810
811                 # For USE=multislot, there are a couple of cases to
812                 # handle here:
813                 #
814                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
815                 #    unknown value, so just record an unslotted atom.
816                 #
817                 # 2) SLOT comes from an installed package and there is no
818                 #    matching SLOT in the portage tree.
819                 #
820                 # Make sure that the slot atom is available in either the
821                 # portdb or the vardb, since otherwise the user certainly
822                 # doesn't want the SLOT atom recorded in the world file
823                 # (case 1 above).  If it's only available in the vardb,
824                 # the user may be trying to prevent a USE=multislot
825                 # package from being removed by --depclean (case 2 above).
826
827                 mydb = portdb
828                 if not portdb.match(slot_atom):
829                         # SLOT seems to come from an installed multislot package
830                         mydb = vardb
831                 # If there is no installed package matching the SLOT atom,
832                 # it probably changed SLOT spontaneously due to USE=multislot,
833                 # so just record an unslotted atom.
834                 if vardb.match(slot_atom):
835                         # Now verify that the argument is precise
836                         # enough to identify a specific slot.
837                         matches = mydb.match(arg_atom)
838                         matched_slots = set()
839                         for cpv in matches:
840                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
841                         if len(matched_slots) == 1:
842                                 new_world_atom = slot_atom
843
844         if new_world_atom == sets["world"].findAtomForPackage(pkg):
845                 # Both atoms would be identical, so there's nothing to add.
846                 return None
847         if not slotted:
848                 # Unlike world atoms, system atoms are not greedy for slots, so they
849                 # can't be safely excluded from world if they are slotted.
850                 system_atom = sets["system"].findAtomForPackage(pkg)
851                 if system_atom:
852                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
853                                 return None
854                         # System virtuals aren't safe to exclude from world since they can
855                         # match multiple old-style virtuals but only one of them will be
856                         # pulled in by update or depclean.
857                         providers = portdb.mysettings.getvirtuals().get(
858                                 portage.dep_getkey(system_atom))
859                         if providers and len(providers) == 1 and providers[0] == cp:
860                                 return None
861         return new_world_atom
862
863 def filter_iuse_defaults(iuse):
864         for flag in iuse:
865                 if flag.startswith("+") or flag.startswith("-"):
866                         yield flag[1:]
867                 else:
868                         yield flag
869
870 class SlotObject(object):
871         __slots__ = ("__weakref__",)
872
873         def __init__(self, **kwargs):
874                 classes = [self.__class__]
875                 while classes:
876                         c = classes.pop()
877                         if c is SlotObject:
878                                 continue
879                         classes.extend(c.__bases__)
880                         slots = getattr(c, "__slots__", None)
881                         if not slots:
882                                 continue
883                         for myattr in slots:
884                                 myvalue = kwargs.get(myattr, None)
885                                 setattr(self, myattr, myvalue)
886
887         def copy(self):
888                 """
889                 Create a new instance and copy all attributes
890                 defined from __slots__ (including those from
891                 inherited classes).
892                 """
893                 obj = self.__class__()
894
895                 classes = [self.__class__]
896                 while classes:
897                         c = classes.pop()
898                         if c is SlotObject:
899                                 continue
900                         classes.extend(c.__bases__)
901                         slots = getattr(c, "__slots__", None)
902                         if not slots:
903                                 continue
904                         for myattr in slots:
905                                 setattr(obj, myattr, getattr(self, myattr))
906
907                 return obj
908
909 class AbstractDepPriority(SlotObject):
910         __slots__ = ("buildtime", "runtime", "runtime_post")
911
912         def __lt__(self, other):
913                 return self.__int__() < other
914
915         def __le__(self, other):
916                 return self.__int__() <= other
917
918         def __eq__(self, other):
919                 return self.__int__() == other
920
921         def __ne__(self, other):
922                 return self.__int__() != other
923
924         def __gt__(self, other):
925                 return self.__int__() > other
926
927         def __ge__(self, other):
928                 return self.__int__() >= other
929
930         def copy(self):
931                 import copy
932                 return copy.copy(self)
933
934 class DepPriority(AbstractDepPriority):
935
936         __slots__ = ("satisfied", "optional", "rebuild")
937
938         def __int__(self):
939                 return 0
940
941         def __str__(self):
942                 if self.optional:
943                         return "optional"
944                 if self.buildtime:
945                         return "buildtime"
946                 if self.runtime:
947                         return "runtime"
948                 if self.runtime_post:
949                         return "runtime_post"
950                 return "soft"
951
952 class BlockerDepPriority(DepPriority):
953         __slots__ = ()
954         def __int__(self):
955                 return 0
956
957         def __str__(self):
958                 return 'blocker'
959
960 BlockerDepPriority.instance = BlockerDepPriority()
961
962 class UnmergeDepPriority(AbstractDepPriority):
963         __slots__ = ("optional", "satisfied",)
964         """
965         Combination of properties           Priority  Category
966
967         runtime                                0       HARD
968         runtime_post                          -1       HARD
969         buildtime                             -2       SOFT
970         (none of the above)                   -2       SOFT
971         """
972
973         MAX    =  0
974         SOFT   = -2
975         MIN    = -2
976
977         def __int__(self):
978                 if self.runtime:
979                         return 0
980                 if self.runtime_post:
981                         return -1
982                 if self.buildtime:
983                         return -2
984                 return -2
985
986         def __str__(self):
987                 myvalue = self.__int__()
988                 if myvalue > self.SOFT:
989                         return "hard"
990                 return "soft"
991
992 class DepPriorityNormalRange(object):
993         """
994         DepPriority properties              Index      Category
995
996         buildtime                                      HARD
997         runtime                                3       MEDIUM
998         runtime_post                           2       MEDIUM_SOFT
999         optional                               1       SOFT
1000         (none of the above)                    0       NONE
1001         """
1002         MEDIUM      = 3
1003         MEDIUM_SOFT = 2
1004         SOFT        = 1
1005         NONE        = 0
1006
1007         @classmethod
1008         def _ignore_optional(cls, priority):
1009                 if priority.__class__ is not DepPriority:
1010                         return False
1011                 return bool(priority.optional)
1012
1013         @classmethod
1014         def _ignore_runtime_post(cls, priority):
1015                 if priority.__class__ is not DepPriority:
1016                         return False
1017                 return bool(priority.optional or priority.runtime_post)
1018
1019         @classmethod
1020         def _ignore_runtime(cls, priority):
1021                 if priority.__class__ is not DepPriority:
1022                         return False
1023                 return not priority.buildtime
1024
1025         ignore_medium      = _ignore_runtime
1026         ignore_medium_soft = _ignore_runtime_post
1027         ignore_soft        = _ignore_optional
1028
1029 DepPriorityNormalRange.ignore_priority = (
1030         None,
1031         DepPriorityNormalRange._ignore_optional,
1032         DepPriorityNormalRange._ignore_runtime_post,
1033         DepPriorityNormalRange._ignore_runtime
1034 )
1035
1036 class DepPrioritySatisfiedRange(object):
1037         """
1038         DepPriority                         Index      Category
1039
1040         not satisfied and buildtime                    HARD
1041         not satisfied and runtime              7       MEDIUM
1042         not satisfied and runtime_post         6       MEDIUM_SOFT
1043         satisfied and buildtime and rebuild    5       SOFT
1044         satisfied and buildtime                4       SOFT
1045         satisfied and runtime                  3       SOFT
1046         satisfied and runtime_post             2       SOFT
1047         optional                               1       SOFT
1048         (none of the above)                    0       NONE
1049         """
1050         MEDIUM      = 7
1051         MEDIUM_SOFT = 6
1052         SOFT        = 5
1053         NONE        = 0
1054
1055         @classmethod
1056         def _ignore_optional(cls, priority):
1057                 if priority.__class__ is not DepPriority:
1058                         return False
1059                 return bool(priority.optional)
1060
1061         @classmethod
1062         def _ignore_satisfied_runtime_post(cls, priority):
1063                 if priority.__class__ is not DepPriority:
1064                         return False
1065                 if priority.optional:
1066                         return True
1067                 if not priority.satisfied:
1068                         return False
1069                 return bool(priority.runtime_post)
1070
1071         @classmethod
1072         def _ignore_satisfied_runtime(cls, priority):
1073                 if priority.__class__ is not DepPriority:
1074                         return False
1075                 if priority.optional:
1076                         return True
1077                 if not priority.satisfied:
1078                         return False
1079                 return not priority.buildtime
1080
1081         @classmethod
1082         def _ignore_satisfied_buildtime(cls, priority):
1083                 if priority.__class__ is not DepPriority:
1084                         return False
1085                 if priority.optional:
1086                         return True
1087                 if not priority.satisfied:
1088                         return False
1089                 if priority.buildtime:
1090                         return not priority.rebuild
1091                 return True
1092
1093         @classmethod
1094         def _ignore_satisfied_buildtime_rebuild(cls, priority):
1095                 if priority.__class__ is not DepPriority:
1096                         return False
1097                 if priority.optional:
1098                         return True
1099                 return bool(priority.satisfied)
1100
1101         @classmethod
1102         def _ignore_runtime_post(cls, priority):
1103                 if priority.__class__ is not DepPriority:
1104                         return False
1105                 return bool(priority.optional or \
1106                         priority.satisfied or \
1107                         priority.runtime_post)
1108
1109         @classmethod
1110         def _ignore_runtime(cls, priority):
1111                 if priority.__class__ is not DepPriority:
1112                         return False
1113                 return bool(priority.satisfied or \
1114                         not priority.buildtime)
1115
1116         ignore_medium      = _ignore_runtime
1117         ignore_medium_soft = _ignore_runtime_post
1118         ignore_soft        = _ignore_satisfied_buildtime_rebuild
1119
1120 DepPrioritySatisfiedRange.ignore_priority = (
1121         None,
1122         DepPrioritySatisfiedRange._ignore_optional,
1123         DepPrioritySatisfiedRange._ignore_satisfied_runtime_post,
1124         DepPrioritySatisfiedRange._ignore_satisfied_runtime,
1125         DepPrioritySatisfiedRange._ignore_satisfied_buildtime,
1126         DepPrioritySatisfiedRange._ignore_satisfied_buildtime_rebuild,
1127         DepPrioritySatisfiedRange._ignore_runtime_post,
1128         DepPrioritySatisfiedRange._ignore_runtime
1129 )
1130
1131 def _find_deep_system_runtime_deps(graph):
1132         deep_system_deps = set()
1133         node_stack = []
1134         for node in graph:
1135                 if not isinstance(node, Package) or \
1136                         node.operation == 'uninstall':
1137                         continue
1138                 if node.root_config.sets['system'].findAtomForPackage(node):
1139                         node_stack.append(node)
1140
1141         def ignore_priority(priority):
1142                 """
1143                 Ignore non-runtime priorities.
1144                 """
1145                 if isinstance(priority, DepPriority) and \
1146                         (priority.runtime or priority.runtime_post):
1147                         return False
1148                 return True
1149
1150         while node_stack:
1151                 node = node_stack.pop()
1152                 if node in deep_system_deps:
1153                         continue
1154                 deep_system_deps.add(node)
1155                 for child in graph.child_nodes(node, ignore_priority=ignore_priority):
1156                         if not isinstance(child, Package) or \
1157                                 child.operation == 'uninstall':
1158                                 continue
1159                         node_stack.append(child)
1160
1161         return deep_system_deps
1162
1163 class FakeVartree(portage.vartree):
1164         """This is implements an in-memory copy of a vartree instance that provides
1165         all the interfaces required for use by the depgraph.  The vardb is locked
1166         during the constructor call just long enough to read a copy of the
1167         installed package information.  This allows the depgraph to do it's
1168         dependency calculations without holding a lock on the vardb.  It also
1169         allows things like vardb global updates to be done in memory so that the
1170         user doesn't necessarily need write access to the vardb in cases where
1171         global updates are necessary (updates are performed when necessary if there
1172         is not a matching ebuild in the tree)."""
1173         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1174                 self._root_config = root_config
1175                 if pkg_cache is None:
1176                         pkg_cache = {}
1177                 real_vartree = root_config.trees["vartree"]
1178                 portdb = root_config.trees["porttree"].dbapi
1179                 self.root = real_vartree.root
1180                 self.settings = real_vartree.settings
1181                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1182                 if "_mtime_" not in mykeys:
1183                         mykeys.append("_mtime_")
1184                 self._db_keys = mykeys
1185                 self._pkg_cache = pkg_cache
1186                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1187                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1188                 try:
1189                         # At least the parent needs to exist for the lock file.
1190                         portage.util.ensure_dirs(vdb_path)
1191                 except portage.exception.PortageException:
1192                         pass
1193                 vdb_lock = None
1194                 try:
1195                         if acquire_lock and os.access(vdb_path, os.W_OK):
1196                                 vdb_lock = portage.locks.lockdir(vdb_path)
1197                         real_dbapi = real_vartree.dbapi
1198                         slot_counters = {}
1199                         for cpv in real_dbapi.cpv_all():
1200                                 cache_key = ("installed", self.root, cpv, "nomerge")
1201                                 pkg = self._pkg_cache.get(cache_key)
1202                                 if pkg is not None:
1203                                         metadata = pkg.metadata
1204                                 else:
1205                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1206                                 myslot = metadata["SLOT"]
1207                                 mycp = portage.dep_getkey(cpv)
1208                                 myslot_atom = "%s:%s" % (mycp, myslot)
1209                                 try:
1210                                         mycounter = long(metadata["COUNTER"])
1211                                 except ValueError:
1212                                         mycounter = 0
1213                                         metadata["COUNTER"] = str(mycounter)
1214                                 other_counter = slot_counters.get(myslot_atom, None)
1215                                 if other_counter is not None:
1216                                         if other_counter > mycounter:
1217                                                 continue
1218                                 slot_counters[myslot_atom] = mycounter
1219                                 if pkg is None:
1220                                         pkg = Package(built=True, cpv=cpv,
1221                                                 installed=True, metadata=metadata,
1222                                                 root_config=root_config, type_name="installed")
1223                                 self._pkg_cache[pkg] = pkg
1224                                 self.dbapi.cpv_inject(pkg)
1225                         real_dbapi.flush_cache()
1226                 finally:
1227                         if vdb_lock:
1228                                 portage.locks.unlockdir(vdb_lock)
1229                 # Populate the old-style virtuals using the cached values.
1230                 if not self.settings.treeVirtuals:
1231                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1232                                 portage.getCPFromCPV, self.get_all_provides())
1233
1234                 # Intialize variables needed for lazy cache pulls of the live ebuild
1235                 # metadata.  This ensures that the vardb lock is released ASAP, without
1236                 # being delayed in case cache generation is triggered.
1237                 self._aux_get = self.dbapi.aux_get
1238                 self.dbapi.aux_get = self._aux_get_wrapper
1239                 self._match = self.dbapi.match
1240                 self.dbapi.match = self._match_wrapper
1241                 self._aux_get_history = set()
1242                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1243                 self._portdb = portdb
1244                 self._global_updates = None
1245
1246         def _match_wrapper(self, cpv, use_cache=1):
1247                 """
1248                 Make sure the metadata in Package instances gets updated for any
1249                 cpv that is returned from a match() call, since the metadata can
1250                 be accessed directly from the Package instance instead of via
1251                 aux_get().
1252                 """
1253                 matches = self._match(cpv, use_cache=use_cache)
1254                 for cpv in matches:
1255                         if cpv in self._aux_get_history:
1256                                 continue
1257                         self._aux_get_wrapper(cpv, [])
1258                 return matches
1259
1260         def _aux_get_wrapper(self, pkg, wants):
1261                 if pkg in self._aux_get_history:
1262                         return self._aux_get(pkg, wants)
1263                 self._aux_get_history.add(pkg)
1264                 try:
1265                         # Use the live ebuild metadata if possible.
1266                         live_metadata = dict(izip(self._portdb_keys,
1267                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1268                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1269                                 raise KeyError(pkg)
1270                         self.dbapi.aux_update(pkg, live_metadata)
1271                 except (KeyError, portage.exception.PortageException):
1272                         if self._global_updates is None:
1273                                 self._global_updates = \
1274                                         grab_global_updates(self._portdb.porttree_root)
1275                         perform_global_updates(
1276                                 pkg, self.dbapi, self._global_updates)
1277                 return self._aux_get(pkg, wants)
1278
1279         def sync(self, acquire_lock=1):
1280                 """
1281                 Call this method to synchronize state with the real vardb
1282                 after one or more packages may have been installed or
1283                 uninstalled.
1284                 """
1285                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1286                 try:
1287                         # At least the parent needs to exist for the lock file.
1288                         portage.util.ensure_dirs(vdb_path)
1289                 except portage.exception.PortageException:
1290                         pass
1291                 vdb_lock = None
1292                 try:
1293                         if acquire_lock and os.access(vdb_path, os.W_OK):
1294                                 vdb_lock = portage.locks.lockdir(vdb_path)
1295                         self._sync()
1296                 finally:
1297                         if vdb_lock:
1298                                 portage.locks.unlockdir(vdb_lock)
1299
1300         def _sync(self):
1301
1302                 real_vardb = self._root_config.trees["vartree"].dbapi
1303                 current_cpv_set = frozenset(real_vardb.cpv_all())
1304                 pkg_vardb = self.dbapi
1305                 aux_get_history = self._aux_get_history
1306
1307                 # Remove any packages that have been uninstalled.
1308                 for pkg in list(pkg_vardb):
1309                         if pkg.cpv not in current_cpv_set:
1310                                 pkg_vardb.cpv_remove(pkg)
1311                                 aux_get_history.discard(pkg.cpv)
1312
1313                 # Validate counters and timestamps.
1314                 slot_counters = {}
1315                 root = self.root
1316                 validation_keys = ["COUNTER", "_mtime_"]
1317                 for cpv in current_cpv_set:
1318
1319                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1320                         pkg = pkg_vardb.get(pkg_hash_key)
1321                         if pkg is not None:
1322                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1323                                 try:
1324                                         counter = long(counter)
1325                                 except ValueError:
1326                                         counter = 0
1327
1328                                 if counter != pkg.counter or \
1329                                         mtime != pkg.mtime:
1330                                         pkg_vardb.cpv_remove(pkg)
1331                                         aux_get_history.discard(pkg.cpv)
1332                                         pkg = None
1333
1334                         if pkg is None:
1335                                 pkg = self._pkg(cpv)
1336
1337                         other_counter = slot_counters.get(pkg.slot_atom)
1338                         if other_counter is not None:
1339                                 if other_counter > pkg.counter:
1340                                         continue
1341
1342                         slot_counters[pkg.slot_atom] = pkg.counter
1343                         pkg_vardb.cpv_inject(pkg)
1344
1345                 real_vardb.flush_cache()
1346
1347         def _pkg(self, cpv):
1348                 root_config = self._root_config
1349                 real_vardb = root_config.trees["vartree"].dbapi
1350                 pkg = Package(cpv=cpv, installed=True,
1351                         metadata=izip(self._db_keys,
1352                         real_vardb.aux_get(cpv, self._db_keys)),
1353                         root_config=root_config,
1354                         type_name="installed")
1355
1356                 try:
1357                         mycounter = long(pkg.metadata["COUNTER"])
1358                 except ValueError:
1359                         mycounter = 0
1360                         pkg.metadata["COUNTER"] = str(mycounter)
1361
1362                 return pkg
1363
1364 def grab_global_updates(portdir):
1365         from portage.update import grab_updates, parse_updates
1366         updpath = os.path.join(portdir, "profiles", "updates")
1367         try:
1368                 rawupdates = grab_updates(updpath)
1369         except portage.exception.DirectoryNotFound:
1370                 rawupdates = []
1371         upd_commands = []
1372         for mykey, mystat, mycontent in rawupdates:
1373                 commands, errors = parse_updates(mycontent)
1374                 upd_commands.extend(commands)
1375         return upd_commands
1376
1377 def perform_global_updates(mycpv, mydb, mycommands):
1378         from portage.update import update_dbentries
1379         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1380         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1381         updates = update_dbentries(mycommands, aux_dict)
1382         if updates:
1383                 mydb.aux_update(mycpv, updates)
1384
1385 def visible(pkgsettings, pkg):
1386         """
1387         Check if a package is visible. This can raise an InvalidDependString
1388         exception if LICENSE is invalid.
1389         TODO: optionally generate a list of masking reasons
1390         @rtype: Boolean
1391         @returns: True if the package is visible, False otherwise.
1392         """
1393         if not pkg.metadata["SLOT"]:
1394                 return False
1395         if not pkg.installed:
1396                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1397                         return False
1398         eapi = pkg.metadata["EAPI"]
1399         if not portage.eapi_is_supported(eapi):
1400                 return False
1401         if not pkg.installed:
1402                 if portage._eapi_is_deprecated(eapi):
1403                         return False
1404                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1405                         return False
1406         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1407                 return False
1408         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1409                 return False
1410         try:
1411                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1412                         return False
1413         except portage.exception.InvalidDependString:
1414                 return False
1415         return True
1416
1417 def get_masking_status(pkg, pkgsettings, root_config):
1418
1419         mreasons = portage.getmaskingstatus(
1420                 pkg, settings=pkgsettings,
1421                 portdb=root_config.trees["porttree"].dbapi)
1422
1423         if not pkg.installed:
1424                 if not pkgsettings._accept_chost(pkg.cpv, pkg.metadata):
1425                         mreasons.append("CHOST: %s" % \
1426                                 pkg.metadata["CHOST"])
1427
1428         if not pkg.metadata["SLOT"]:
1429                 mreasons.append("invalid: SLOT is undefined")
1430
1431         return mreasons
1432
1433 def get_mask_info(root_config, cpv, pkgsettings,
1434         db, pkg_type, built, installed, db_keys):
1435         eapi_masked = False
1436         try:
1437                 metadata = dict(izip(db_keys,
1438                         db.aux_get(cpv, db_keys)))
1439         except KeyError:
1440                 metadata = None
1441         if metadata and not built:
1442                 pkgsettings.setcpv(cpv, mydb=metadata)
1443                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1444                 metadata['CHOST'] = pkgsettings.get('CHOST', '')
1445         if metadata is None:
1446                 mreasons = ["corruption"]
1447         else:
1448                 eapi = metadata['EAPI']
1449                 if eapi[:1] == '-':
1450                         eapi = eapi[1:]
1451                 if not portage.eapi_is_supported(eapi):
1452                         mreasons = ['EAPI %s' % eapi]
1453                 else:
1454                         pkg = Package(type_name=pkg_type, root_config=root_config,
1455                                 cpv=cpv, built=built, installed=installed, metadata=metadata)
1456                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
1457         return metadata, mreasons
1458
1459 def show_masked_packages(masked_packages):
1460         shown_licenses = set()
1461         shown_comments = set()
1462         # Maybe there is both an ebuild and a binary. Only
1463         # show one of them to avoid redundant appearance.
1464         shown_cpvs = set()
1465         have_eapi_mask = False
1466         for (root_config, pkgsettings, cpv,
1467                 metadata, mreasons) in masked_packages:
1468                 if cpv in shown_cpvs:
1469                         continue
1470                 shown_cpvs.add(cpv)
1471                 comment, filename = None, None
1472                 if "package.mask" in mreasons:
1473                         comment, filename = \
1474                                 portage.getmaskingreason(
1475                                 cpv, metadata=metadata,
1476                                 settings=pkgsettings,
1477                                 portdb=root_config.trees["porttree"].dbapi,
1478                                 return_location=True)
1479                 missing_licenses = []
1480                 if metadata:
1481                         if not portage.eapi_is_supported(metadata["EAPI"]):
1482                                 have_eapi_mask = True
1483                         try:
1484                                 missing_licenses = \
1485                                         pkgsettings._getMissingLicenses(
1486                                                 cpv, metadata)
1487                         except portage.exception.InvalidDependString:
1488                                 # This will have already been reported
1489                                 # above via mreasons.
1490                                 pass
1491
1492                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1493                 if comment and comment not in shown_comments:
1494                         print filename+":"
1495                         print comment
1496                         shown_comments.add(comment)
1497                 portdb = root_config.trees["porttree"].dbapi
1498                 for l in missing_licenses:
1499                         l_path = portdb.findLicensePath(l)
1500                         if l in shown_licenses:
1501                                 continue
1502                         msg = ("A copy of the '%s' license" + \
1503                         " is located at '%s'.") % (l, l_path)
1504                         print msg
1505                         print
1506                         shown_licenses.add(l)
1507         return have_eapi_mask
1508
1509 class Task(SlotObject):
1510         __slots__ = ("_hash_key", "_hash_value")
1511
1512         def _get_hash_key(self):
1513                 hash_key = getattr(self, "_hash_key", None)
1514                 if hash_key is None:
1515                         raise NotImplementedError(self)
1516                 return hash_key
1517
1518         def __eq__(self, other):
1519                 return self._get_hash_key() == other
1520
1521         def __ne__(self, other):
1522                 return self._get_hash_key() != other
1523
1524         def __hash__(self):
1525                 hash_value = getattr(self, "_hash_value", None)
1526                 if hash_value is None:
1527                         self._hash_value = hash(self._get_hash_key())
1528                 return self._hash_value
1529
1530         def __len__(self):
1531                 return len(self._get_hash_key())
1532
1533         def __getitem__(self, key):
1534                 return self._get_hash_key()[key]
1535
1536         def __iter__(self):
1537                 return iter(self._get_hash_key())
1538
1539         def __contains__(self, key):
1540                 return key in self._get_hash_key()
1541
1542         def __str__(self):
1543                 return str(self._get_hash_key())
1544
1545 class Blocker(Task):
1546
1547         __hash__ = Task.__hash__
1548         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1549
1550         def __init__(self, **kwargs):
1551                 Task.__init__(self, **kwargs)
1552                 self.cp = portage.dep_getkey(self.atom)
1553
1554         def _get_hash_key(self):
1555                 hash_key = getattr(self, "_hash_key", None)
1556                 if hash_key is None:
1557                         self._hash_key = \
1558                                 ("blocks", self.root, self.atom, self.eapi)
1559                 return self._hash_key
1560
1561 class Package(Task):
1562
1563         __hash__ = Task.__hash__
1564         __slots__ = ("built", "cpv", "depth",
1565                 "installed", "metadata", "onlydeps", "operation",
1566                 "root_config", "type_name",
1567                 "category", "counter", "cp", "cpv_split",
1568                 "inherited", "iuse", "mtime",
1569                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1570
1571         metadata_keys = [
1572                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1573                 "INHERITED", "IUSE", "KEYWORDS",
1574                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1575                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1576
1577         def __init__(self, **kwargs):
1578                 Task.__init__(self, **kwargs)
1579                 self.root = self.root_config.root
1580                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1581                 self.cp = portage.cpv_getkey(self.cpv)
1582                 slot = self.slot
1583                 if not slot:
1584                         # Avoid an InvalidAtom exception when creating slot_atom.
1585                         # This package instance will be masked due to empty SLOT.
1586                         slot = '0'
1587                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, slot))
1588                 self.category, self.pf = portage.catsplit(self.cpv)
1589                 self.cpv_split = portage.catpkgsplit(self.cpv)
1590                 self.pv_split = self.cpv_split[1:]
1591
1592         class _use(object):
1593
1594                 __slots__ = ("__weakref__", "enabled")
1595
1596                 def __init__(self, use):
1597                         self.enabled = frozenset(use)
1598
1599         class _iuse(object):
1600
1601                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1602
1603                 def __init__(self, tokens, iuse_implicit):
1604                         self.tokens = tuple(tokens)
1605                         self.iuse_implicit = iuse_implicit
1606                         enabled = []
1607                         disabled = []
1608                         other = []
1609                         for x in tokens:
1610                                 prefix = x[:1]
1611                                 if prefix == "+":
1612                                         enabled.append(x[1:])
1613                                 elif prefix == "-":
1614                                         disabled.append(x[1:])
1615                                 else:
1616                                         other.append(x)
1617                         self.enabled = frozenset(enabled)
1618                         self.disabled = frozenset(disabled)
1619                         self.all = frozenset(chain(enabled, disabled, other))
1620
1621                 def __getattribute__(self, name):
1622                         if name == "regex":
1623                                 try:
1624                                         return object.__getattribute__(self, "regex")
1625                                 except AttributeError:
1626                                         all = object.__getattribute__(self, "all")
1627                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1628                                         # Escape anything except ".*" which is supposed
1629                                         # to pass through from _get_implicit_iuse()
1630                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1631                                         regex = "^(%s)$" % "|".join(regex)
1632                                         regex = regex.replace("\\.\\*", ".*")
1633                                         self.regex = re.compile(regex)
1634                         return object.__getattribute__(self, name)
1635
1636         def _get_hash_key(self):
1637                 hash_key = getattr(self, "_hash_key", None)
1638                 if hash_key is None:
1639                         if self.operation is None:
1640                                 self.operation = "merge"
1641                                 if self.onlydeps or self.installed:
1642                                         self.operation = "nomerge"
1643                         self._hash_key = \
1644                                 (self.type_name, self.root, self.cpv, self.operation)
1645                 return self._hash_key
1646
1647         def __lt__(self, other):
1648                 if other.cp != self.cp:
1649                         return False
1650                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1651                         return True
1652                 return False
1653
1654         def __le__(self, other):
1655                 if other.cp != self.cp:
1656                         return False
1657                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1658                         return True
1659                 return False
1660
1661         def __gt__(self, other):
1662                 if other.cp != self.cp:
1663                         return False
1664                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1665                         return True
1666                 return False
1667
1668         def __ge__(self, other):
1669                 if other.cp != self.cp:
1670                         return False
1671                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1672                         return True
1673                 return False
1674
1675 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1676         if not x.startswith("UNUSED_"))
1677 _all_metadata_keys.discard("CDEPEND")
1678 _all_metadata_keys.update(Package.metadata_keys)
1679
1680 from portage.cache.mappings import slot_dict_class
1681 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1682
1683 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1684         """
1685         Detect metadata updates and synchronize Package attributes.
1686         """
1687
1688         __slots__ = ("_pkg",)
1689         _wrapped_keys = frozenset(
1690                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1691
1692         def __init__(self, pkg, metadata):
1693                 _PackageMetadataWrapperBase.__init__(self)
1694                 self._pkg = pkg
1695                 self.update(metadata)
1696
1697         def __setitem__(self, k, v):
1698                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1699                 if k in self._wrapped_keys:
1700                         getattr(self, "_set_" + k.lower())(k, v)
1701
1702         def _set_inherited(self, k, v):
1703                 if isinstance(v, basestring):
1704                         v = frozenset(v.split())
1705                 self._pkg.inherited = v
1706
1707         def _set_iuse(self, k, v):
1708                 self._pkg.iuse = self._pkg._iuse(
1709                         v.split(), self._pkg.root_config.iuse_implicit)
1710
1711         def _set_slot(self, k, v):
1712                 self._pkg.slot = v
1713
1714         def _set_use(self, k, v):
1715                 self._pkg.use = self._pkg._use(v.split())
1716
1717         def _set_counter(self, k, v):
1718                 if isinstance(v, basestring):
1719                         try:
1720                                 v = long(v.strip())
1721                         except ValueError:
1722                                 v = 0
1723                 self._pkg.counter = v
1724
1725         def _set__mtime_(self, k, v):
1726                 if isinstance(v, basestring):
1727                         try:
1728                                 v = long(v.strip())
1729                         except ValueError:
1730                                 v = 0
1731                 self._pkg.mtime = v
1732
1733 class EbuildFetchonly(SlotObject):
1734
1735         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1736
1737         def execute(self):
1738                 settings = self.settings
1739                 pkg = self.pkg
1740                 portdb = pkg.root_config.trees["porttree"].dbapi
1741                 ebuild_path = portdb.findname(pkg.cpv)
1742                 settings.setcpv(pkg)
1743                 debug = settings.get("PORTAGE_DEBUG") == "1"
1744                 use_cache = 1 # always true
1745                 portage.doebuild_environment(ebuild_path, "fetch",
1746                         settings["ROOT"], settings, debug, use_cache, portdb)
1747                 restrict_fetch = 'fetch' in settings['PORTAGE_RESTRICT'].split()
1748
1749                 if restrict_fetch:
1750                         rval = self._execute_with_builddir()
1751                 else:
1752                         rval = portage.doebuild(ebuild_path, "fetch",
1753                                 settings["ROOT"], settings, debug=debug,
1754                                 listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1755                                 mydbapi=portdb, tree="porttree")
1756
1757                         if rval != os.EX_OK:
1758                                 msg = "Fetch failed for '%s'" % (pkg.cpv,)
1759                                 eerror(msg, phase="unpack", key=pkg.cpv)
1760
1761                 return rval
1762
1763         def _execute_with_builddir(self):
1764                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1765                 # ensuring sane $PWD (bug #239560) and storing elog
1766                 # messages. Use a private temp directory, in order
1767                 # to avoid locking the main one.
1768                 settings = self.settings
1769                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1770                 from tempfile import mkdtemp
1771                 try:
1772                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1773                 except OSError, e:
1774                         if e.errno != portage.exception.PermissionDenied.errno:
1775                                 raise
1776                         raise portage.exception.PermissionDenied(global_tmpdir)
1777                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1778                 settings.backup_changes("PORTAGE_TMPDIR")
1779                 try:
1780                         retval = self._execute()
1781                 finally:
1782                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1783                         settings.backup_changes("PORTAGE_TMPDIR")
1784                         shutil.rmtree(private_tmpdir)
1785                 return retval
1786
1787         def _execute(self):
1788                 settings = self.settings
1789                 pkg = self.pkg
1790                 root_config = pkg.root_config
1791                 portdb = root_config.trees["porttree"].dbapi
1792                 ebuild_path = portdb.findname(pkg.cpv)
1793                 debug = settings.get("PORTAGE_DEBUG") == "1"
1794                 retval = portage.doebuild(ebuild_path, "fetch",
1795                         self.settings["ROOT"], self.settings, debug=debug,
1796                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1797                         mydbapi=portdb, tree="porttree")
1798
1799                 if retval != os.EX_OK:
1800                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1801                         eerror(msg, phase="unpack", key=pkg.cpv)
1802
1803                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1804                 return retval
1805
1806 class PollConstants(object):
1807
1808         """
1809         Provides POLL* constants that are equivalent to those from the
1810         select module, for use by PollSelectAdapter.
1811         """
1812
1813         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1814         v = 1
1815         for k in names:
1816                 locals()[k] = getattr(select, k, v)
1817                 v *= 2
1818         del k, v
1819
1820 class AsynchronousTask(SlotObject):
1821         """
1822         Subclasses override _wait() and _poll() so that calls
1823         to public methods can be wrapped for implementing
1824         hooks such as exit listener notification.
1825
1826         Sublasses should call self.wait() to notify exit listeners after
1827         the task is complete and self.returncode has been set.
1828         """
1829
1830         __slots__ = ("background", "cancelled", "returncode") + \
1831                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1832
1833         def start(self):
1834                 """
1835                 Start an asynchronous task and then return as soon as possible.
1836                 """
1837                 self._start_hook()
1838                 self._start()
1839
1840         def _start(self):
1841                 raise NotImplementedError(self)
1842
1843         def isAlive(self):
1844                 return self.returncode is None
1845
1846         def poll(self):
1847                 self._wait_hook()
1848                 return self._poll()
1849
1850         def _poll(self):
1851                 return self.returncode
1852
1853         def wait(self):
1854                 if self.returncode is None:
1855                         self._wait()
1856                 self._wait_hook()
1857                 return self.returncode
1858
1859         def _wait(self):
1860                 return self.returncode
1861
1862         def cancel(self):
1863                 self.cancelled = True
1864                 self.wait()
1865
1866         def addStartListener(self, f):
1867                 """
1868                 The function will be called with one argument, a reference to self.
1869                 """
1870                 if self._start_listeners is None:
1871                         self._start_listeners = []
1872                 self._start_listeners.append(f)
1873
1874         def removeStartListener(self, f):
1875                 if self._start_listeners is None:
1876                         return
1877                 self._start_listeners.remove(f)
1878
1879         def _start_hook(self):
1880                 if self._start_listeners is not None:
1881                         start_listeners = self._start_listeners
1882                         self._start_listeners = None
1883
1884                         for f in start_listeners:
1885                                 f(self)
1886
1887         def addExitListener(self, f):
1888                 """
1889                 The function will be called with one argument, a reference to self.
1890                 """
1891                 if self._exit_listeners is None:
1892                         self._exit_listeners = []
1893                 self._exit_listeners.append(f)
1894
1895         def removeExitListener(self, f):
1896                 if self._exit_listeners is None:
1897                         if self._exit_listener_stack is not None:
1898                                 self._exit_listener_stack.remove(f)
1899                         return
1900                 self._exit_listeners.remove(f)
1901
1902         def _wait_hook(self):
1903                 """
1904                 Call this method after the task completes, just before returning
1905                 the returncode from wait() or poll(). This hook is
1906                 used to trigger exit listeners when the returncode first
1907                 becomes available.
1908                 """
1909                 if self.returncode is not None and \
1910                         self._exit_listeners is not None:
1911
1912                         # This prevents recursion, in case one of the
1913                         # exit handlers triggers this method again by
1914                         # calling wait(). Use a stack that gives
1915                         # removeExitListener() an opportunity to consume
1916                         # listeners from the stack, before they can get
1917                         # called below. This is necessary because a call
1918                         # to one exit listener may result in a call to
1919                         # removeExitListener() for another listener on
1920                         # the stack. That listener needs to be removed
1921                         # from the stack since it would be inconsistent
1922                         # to call it after it has been been passed into
1923                         # removeExitListener().
1924                         self._exit_listener_stack = self._exit_listeners
1925                         self._exit_listeners = None
1926
1927                         self._exit_listener_stack.reverse()
1928                         while self._exit_listener_stack:
1929                                 self._exit_listener_stack.pop()(self)
1930
1931 class AbstractPollTask(AsynchronousTask):
1932
1933         __slots__ = ("scheduler",) + \
1934                 ("_registered",)
1935
1936         _bufsize = 4096
1937         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1938         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1939                 _exceptional_events
1940
1941         def _unregister(self):
1942                 raise NotImplementedError(self)
1943
1944         def _unregister_if_appropriate(self, event):
1945                 if self._registered:
1946                         if event & self._exceptional_events:
1947                                 self._unregister()
1948                                 self.cancel()
1949                         elif event & PollConstants.POLLHUP:
1950                                 self._unregister()
1951                                 self.wait()
1952
1953 class PipeReader(AbstractPollTask):
1954
1955         """
1956         Reads output from one or more files and saves it in memory,
1957         for retrieval via the getvalue() method. This is driven by
1958         the scheduler's poll() loop, so it runs entirely within the
1959         current process.
1960         """
1961
1962         __slots__ = ("input_files",) + \
1963                 ("_read_data", "_reg_ids")
1964
1965         def _start(self):
1966                 self._reg_ids = set()
1967                 self._read_data = []
1968                 for k, f in self.input_files.iteritems():
1969                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1970                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1971                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1972                                 self._registered_events, self._output_handler))
1973                 self._registered = True
1974
1975         def isAlive(self):
1976                 return self._registered
1977
1978         def cancel(self):
1979                 if self.returncode is None:
1980                         self.returncode = 1
1981                         self.cancelled = True
1982                 self.wait()
1983
1984         def _wait(self):
1985                 if self.returncode is not None:
1986                         return self.returncode
1987
1988                 if self._registered:
1989                         self.scheduler.schedule(self._reg_ids)
1990                         self._unregister()
1991
1992                 self.returncode = os.EX_OK
1993                 return self.returncode
1994
1995         def getvalue(self):
1996                 """Retrieve the entire contents"""
1997                 if sys.hexversion >= 0x3000000:
1998                         return bytes().join(self._read_data)
1999                 return "".join(self._read_data)
2000
2001         def close(self):
2002                 """Free the memory buffer."""
2003                 self._read_data = None
2004
2005         def _output_handler(self, fd, event):
2006
2007                 if event & PollConstants.POLLIN:
2008
2009                         for f in self.input_files.itervalues():
2010                                 if fd == f.fileno():
2011                                         break
2012
2013                         buf = array.array('B')
2014                         try:
2015                                 buf.fromfile(f, self._bufsize)
2016                         except EOFError:
2017                                 pass
2018
2019                         if buf:
2020                                 self._read_data.append(buf.tostring())
2021                         else:
2022                                 self._unregister()
2023                                 self.wait()
2024
2025                 self._unregister_if_appropriate(event)
2026                 return self._registered
2027
2028         def _unregister(self):
2029                 """
2030                 Unregister from the scheduler and close open files.
2031                 """
2032
2033                 self._registered = False
2034
2035                 if self._reg_ids is not None:
2036                         for reg_id in self._reg_ids:
2037                                 self.scheduler.unregister(reg_id)
2038                         self._reg_ids = None
2039
2040                 if self.input_files is not None:
2041                         for f in self.input_files.itervalues():
2042                                 f.close()
2043                         self.input_files = None
2044
2045 class CompositeTask(AsynchronousTask):
2046
2047         __slots__ = ("scheduler",) + ("_current_task",)
2048
2049         def isAlive(self):
2050                 return self._current_task is not None
2051
2052         def cancel(self):
2053                 self.cancelled = True
2054                 if self._current_task is not None:
2055                         self._current_task.cancel()
2056
2057         def _poll(self):
2058                 """
2059                 This does a loop calling self._current_task.poll()
2060                 repeatedly as long as the value of self._current_task
2061                 keeps changing. It calls poll() a maximum of one time
2062                 for a given self._current_task instance. This is useful
2063                 since calling poll() on a task can trigger advance to
2064                 the next task could eventually lead to the returncode
2065                 being set in cases when polling only a single task would
2066                 not have the same effect.
2067                 """
2068
2069                 prev = None
2070                 while True:
2071                         task = self._current_task
2072                         if task is None or task is prev:
2073                                 # don't poll the same task more than once
2074                                 break
2075                         task.poll()
2076                         prev = task
2077
2078                 return self.returncode
2079
2080         def _wait(self):
2081
2082                 prev = None
2083                 while True:
2084                         task = self._current_task
2085                         if task is None:
2086                                 # don't wait for the same task more than once
2087                                 break
2088                         if task is prev:
2089                                 # Before the task.wait() method returned, an exit
2090                                 # listener should have set self._current_task to either
2091                                 # a different task or None. Something is wrong.
2092                                 raise AssertionError("self._current_task has not " + \
2093                                         "changed since calling wait", self, task)
2094                         task.wait()
2095                         prev = task
2096
2097                 return self.returncode
2098
2099         def _assert_current(self, task):
2100                 """
2101                 Raises an AssertionError if the given task is not the
2102                 same one as self._current_task. This can be useful
2103                 for detecting bugs.
2104                 """
2105                 if task is not self._current_task:
2106                         raise AssertionError("Unrecognized task: %s" % (task,))
2107
2108         def _default_exit(self, task):
2109                 """
2110                 Calls _assert_current() on the given task and then sets the
2111                 composite returncode attribute if task.returncode != os.EX_OK.
2112                 If the task failed then self._current_task will be set to None.
2113                 Subclasses can use this as a generic task exit callback.
2114
2115                 @rtype: int
2116                 @returns: The task.returncode attribute.
2117                 """
2118                 self._assert_current(task)
2119                 if task.returncode != os.EX_OK:
2120                         self.returncode = task.returncode
2121                         self._current_task = None
2122                 return task.returncode
2123
2124         def _final_exit(self, task):
2125                 """
2126                 Assumes that task is the final task of this composite task.
2127                 Calls _default_exit() and sets self.returncode to the task's
2128                 returncode and sets self._current_task to None.
2129                 """
2130                 self._default_exit(task)
2131                 self._current_task = None
2132                 self.returncode = task.returncode
2133                 return self.returncode
2134
2135         def _default_final_exit(self, task):
2136                 """
2137                 This calls _final_exit() and then wait().
2138
2139                 Subclasses can use this as a generic final task exit callback.
2140
2141                 """
2142                 self._final_exit(task)
2143                 return self.wait()
2144
2145         def _start_task(self, task, exit_handler):
2146                 """
2147                 Register exit handler for the given task, set it
2148                 as self._current_task, and call task.start().
2149
2150                 Subclasses can use this as a generic way to start
2151                 a task.
2152
2153                 """
2154                 task.addExitListener(exit_handler)
2155                 self._current_task = task
2156                 task.start()
2157
2158 class TaskSequence(CompositeTask):
2159         """
2160         A collection of tasks that executes sequentially. Each task
2161         must have a addExitListener() method that can be used as
2162         a means to trigger movement from one task to the next.
2163         """
2164
2165         __slots__ = ("_task_queue",)
2166
2167         def __init__(self, **kwargs):
2168                 AsynchronousTask.__init__(self, **kwargs)
2169                 self._task_queue = deque()
2170
2171         def add(self, task):
2172                 self._task_queue.append(task)
2173
2174         def _start(self):
2175                 self._start_next_task()
2176
2177         def cancel(self):
2178                 self._task_queue.clear()
2179                 CompositeTask.cancel(self)
2180
2181         def _start_next_task(self):
2182                 self._start_task(self._task_queue.popleft(),
2183                         self._task_exit_handler)
2184
2185         def _task_exit_handler(self, task):
2186                 if self._default_exit(task) != os.EX_OK:
2187                         self.wait()
2188                 elif self._task_queue:
2189                         self._start_next_task()
2190                 else:
2191                         self._final_exit(task)
2192                         self.wait()
2193
2194 class SubProcess(AbstractPollTask):
2195
2196         __slots__ = ("pid",) + \
2197                 ("_files", "_reg_id")
2198
2199         # A file descriptor is required for the scheduler to monitor changes from
2200         # inside a poll() loop. When logging is not enabled, create a pipe just to
2201         # serve this purpose alone.
2202         _dummy_pipe_fd = 9
2203
2204         def _poll(self):
2205                 if self.returncode is not None:
2206                         return self.returncode
2207                 if self.pid is None:
2208                         return self.returncode
2209                 if self._registered:
2210                         return self.returncode
2211
2212                 try:
2213                         retval = os.waitpid(self.pid, os.WNOHANG)
2214                 except OSError, e:
2215                         if e.errno != errno.ECHILD:
2216                                 raise
2217                         del e
2218                         retval = (self.pid, 1)
2219
2220                 if retval == (0, 0):
2221                         return None
2222                 self._set_returncode(retval)
2223                 return self.returncode
2224
2225         def cancel(self):
2226                 if self.isAlive():
2227                         try:
2228                                 os.kill(self.pid, signal.SIGTERM)
2229                         except OSError, e:
2230                                 if e.errno != errno.ESRCH:
2231                                         raise
2232                                 del e
2233
2234                 self.cancelled = True
2235                 if self.pid is not None:
2236                         self.wait()
2237                 return self.returncode
2238
2239         def isAlive(self):
2240                 return self.pid is not None and \
2241                         self.returncode is None
2242
2243         def _wait(self):
2244
2245                 if self.returncode is not None:
2246                         return self.returncode
2247
2248                 if self._registered:
2249                         self.scheduler.schedule(self._reg_id)
2250                         self._unregister()
2251                         if self.returncode is not None:
2252                                 return self.returncode
2253
2254                 try:
2255                         wait_retval = os.waitpid(self.pid, 0)
2256                 except OSError, e:
2257                         if e.errno != errno.ECHILD:
2258                                 raise
2259                         del e
2260                         self._set_returncode((self.pid, 1))
2261                 else:
2262                         self._set_returncode(wait_retval)
2263
2264                 return self.returncode
2265
2266         def _unregister(self):
2267                 """
2268                 Unregister from the scheduler and close open files.
2269                 """
2270
2271                 self._registered = False
2272
2273                 if self._reg_id is not None:
2274                         self.scheduler.unregister(self._reg_id)
2275                         self._reg_id = None
2276
2277                 if self._files is not None:
2278                         for f in self._files.itervalues():
2279                                 f.close()
2280                         self._files = None
2281
2282         def _set_returncode(self, wait_retval):
2283
2284                 retval = wait_retval[1]
2285
2286                 if retval != os.EX_OK:
2287                         if retval & 0xff:
2288                                 retval = (retval & 0xff) << 8
2289                         else:
2290                                 retval = retval >> 8
2291
2292                 self.returncode = retval
2293
2294 class SpawnProcess(SubProcess):
2295
2296         """
2297         Constructor keyword args are passed into portage.process.spawn().
2298         The required "args" keyword argument will be passed as the first
2299         spawn() argument.
2300         """
2301
2302         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2303                 "uid", "gid", "groups", "umask", "logfile",
2304                 "path_lookup", "pre_exec")
2305
2306         __slots__ = ("args",) + \
2307                 _spawn_kwarg_names
2308
2309         _file_names = ("log", "process", "stdout")
2310         _files_dict = slot_dict_class(_file_names, prefix="")
2311
2312         def _start(self):
2313
2314                 if self.cancelled:
2315                         return
2316
2317                 if self.fd_pipes is None:
2318                         self.fd_pipes = {}
2319                 fd_pipes = self.fd_pipes
2320                 fd_pipes.setdefault(0, sys.stdin.fileno())
2321                 fd_pipes.setdefault(1, sys.stdout.fileno())
2322                 fd_pipes.setdefault(2, sys.stderr.fileno())
2323
2324                 # flush any pending output
2325                 for fd in fd_pipes.itervalues():
2326                         if fd == sys.stdout.fileno():
2327                                 sys.stdout.flush()
2328                         if fd == sys.stderr.fileno():
2329                                 sys.stderr.flush()
2330
2331                 logfile = self.logfile
2332                 self._files = self._files_dict()
2333                 files = self._files
2334
2335                 master_fd, slave_fd = self._pipe(fd_pipes)
2336                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2337                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2338
2339                 null_input = None
2340                 fd_pipes_orig = fd_pipes.copy()
2341                 if self.background:
2342                         # TODO: Use job control functions like tcsetpgrp() to control
2343                         # access to stdin. Until then, use /dev/null so that any
2344                         # attempts to read from stdin will immediately return EOF
2345                         # instead of blocking indefinitely.
2346                         null_input = open('/dev/null', 'rb')
2347                         fd_pipes[0] = null_input.fileno()
2348                 else:
2349                         fd_pipes[0] = fd_pipes_orig[0]
2350
2351                 files.process = os.fdopen(master_fd, 'rb')
2352                 if logfile is not None:
2353
2354                         fd_pipes[1] = slave_fd
2355                         fd_pipes[2] = slave_fd
2356
2357                         files.log = open(logfile, mode='ab')
2358                         portage.util.apply_secpass_permissions(logfile,
2359                                 uid=portage.portage_uid, gid=portage.portage_gid,
2360                                 mode=0660)
2361
2362                         if not self.background:
2363                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'wb')
2364
2365                         output_handler = self._output_handler
2366
2367                 else:
2368
2369                         # Create a dummy pipe so the scheduler can monitor
2370                         # the process from inside a poll() loop.
2371                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2372                         if self.background:
2373                                 fd_pipes[1] = slave_fd
2374                                 fd_pipes[2] = slave_fd
2375                         output_handler = self._dummy_handler
2376
2377                 kwargs = {}
2378                 for k in self._spawn_kwarg_names:
2379                         v = getattr(self, k)
2380                         if v is not None:
2381                                 kwargs[k] = v
2382
2383                 kwargs["fd_pipes"] = fd_pipes
2384                 kwargs["returnpid"] = True
2385                 kwargs.pop("logfile", None)
2386
2387                 self._reg_id = self.scheduler.register(files.process.fileno(),
2388                         self._registered_events, output_handler)
2389                 self._registered = True
2390
2391                 retval = self._spawn(self.args, **kwargs)
2392
2393                 os.close(slave_fd)
2394                 if null_input is not None:
2395                         null_input.close()
2396
2397                 if isinstance(retval, int):
2398                         # spawn failed
2399                         self._unregister()
2400                         self.returncode = retval
2401                         self.wait()
2402                         return
2403
2404                 self.pid = retval[0]
2405                 portage.process.spawned_pids.remove(self.pid)
2406
2407         def _pipe(self, fd_pipes):
2408                 """
2409                 @type fd_pipes: dict
2410                 @param fd_pipes: pipes from which to copy terminal size if desired.
2411                 """
2412                 return os.pipe()
2413
2414         def _spawn(self, args, **kwargs):
2415                 return portage.process.spawn(args, **kwargs)
2416
2417         def _output_handler(self, fd, event):
2418
2419                 if event & PollConstants.POLLIN:
2420
2421                         files = self._files
2422                         buf = array.array('B')
2423                         try:
2424                                 buf.fromfile(files.process, self._bufsize)
2425                         except EOFError:
2426                                 pass
2427
2428                         if buf:
2429                                 if not self.background:
2430                                         write_successful = False
2431                                         failures = 0
2432                                         while True:
2433                                                 try:
2434                                                         if not write_successful:
2435                                                                 buf.tofile(files.stdout)
2436                                                                 write_successful = True
2437                                                         files.stdout.flush()
2438                                                         break
2439                                                 except IOError, e:
2440                                                         if e.errno != errno.EAGAIN:
2441                                                                 raise
2442                                                         del e
2443                                                         failures += 1
2444                                                         if failures > 50:
2445                                                                 # Avoid a potentially infinite loop. In
2446                                                                 # most cases, the failure count is zero
2447                                                                 # and it's unlikely to exceed 1.
2448                                                                 raise
2449
2450                                                         # This means that a subprocess has put an inherited
2451                                                         # stdio file descriptor (typically stdin) into
2452                                                         # O_NONBLOCK mode. This is not acceptable (see bug
2453                                                         # #264435), so revert it. We need to use a loop
2454                                                         # here since there's a race condition due to
2455                                                         # parallel processes being able to change the
2456                                                         # flags on the inherited file descriptor.
2457                                                         # TODO: When possible, avoid having child processes
2458                                                         # inherit stdio file descriptors from portage
2459                                                         # (maybe it can't be avoided with
2460                                                         # PROPERTIES=interactive).
2461                                                         fcntl.fcntl(files.stdout.fileno(), fcntl.F_SETFL,
2462                                                                 fcntl.fcntl(files.stdout.fileno(),
2463                                                                 fcntl.F_GETFL) ^ os.O_NONBLOCK)
2464
2465                                 buf.tofile(files.log)
2466                                 files.log.flush()
2467                         else:
2468                                 self._unregister()
2469                                 self.wait()
2470
2471                 self._unregister_if_appropriate(event)
2472                 return self._registered
2473
2474         def _dummy_handler(self, fd, event):
2475                 """
2476                 This method is mainly interested in detecting EOF, since
2477                 the only purpose of the pipe is to allow the scheduler to
2478                 monitor the process from inside a poll() loop.
2479                 """
2480
2481                 if event & PollConstants.POLLIN:
2482
2483                         buf = array.array('B')
2484                         try:
2485                                 buf.fromfile(self._files.process, self._bufsize)
2486                         except EOFError:
2487                                 pass
2488
2489                         if buf:
2490                                 pass
2491                         else:
2492                                 self._unregister()
2493                                 self.wait()
2494
2495                 self._unregister_if_appropriate(event)
2496                 return self._registered
2497
2498 class MiscFunctionsProcess(SpawnProcess):
2499         """
2500         Spawns misc-functions.sh with an existing ebuild environment.
2501         """
2502
2503         __slots__ = ("commands", "phase", "pkg", "settings")
2504
2505         def _start(self):
2506                 settings = self.settings
2507                 settings.pop("EBUILD_PHASE", None)
2508                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2509                 misc_sh_binary = os.path.join(portage_bin_path,
2510                         os.path.basename(portage.const.MISC_SH_BINARY))
2511
2512                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2513                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2514
2515                 portage._doebuild_exit_status_unlink(
2516                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2517
2518                 SpawnProcess._start(self)
2519
2520         def _spawn(self, args, **kwargs):
2521                 settings = self.settings
2522                 debug = settings.get("PORTAGE_DEBUG") == "1"
2523                 return portage.spawn(" ".join(args), settings,
2524                         debug=debug, **kwargs)
2525
2526         def _set_returncode(self, wait_retval):
2527                 SpawnProcess._set_returncode(self, wait_retval)
2528                 self.returncode = portage._doebuild_exit_status_check_and_log(
2529                         self.settings, self.phase, self.returncode)
2530
2531 class EbuildFetcher(SpawnProcess):
2532
2533         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2534                 ("_build_dir",)
2535
2536         def _start(self):
2537
2538                 root_config = self.pkg.root_config
2539                 portdb = root_config.trees["porttree"].dbapi
2540                 ebuild_path = portdb.findname(self.pkg.cpv)
2541                 settings = self.config_pool.allocate()
2542                 settings.setcpv(self.pkg)
2543
2544                 # In prefetch mode, logging goes to emerge-fetch.log and the builddir
2545                 # should not be touched since otherwise it could interfere with
2546                 # another instance of the same cpv concurrently being built for a
2547                 # different $ROOT (currently, builds only cooperate with prefetchers
2548                 # that are spawned for the same $ROOT).
2549                 if not self.prefetch:
2550                         self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2551                         self._build_dir.lock()
2552                         self._build_dir.clean_log()
2553                         portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2554                         if self.logfile is None:
2555                                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2556
2557                 phase = "fetch"
2558                 if self.fetchall:
2559                         phase = "fetchall"
2560
2561                 # If any incremental variables have been overridden
2562                 # via the environment, those values need to be passed
2563                 # along here so that they are correctly considered by
2564                 # the config instance in the subproccess.
2565                 fetch_env = os.environ.copy()
2566
2567                 nocolor = settings.get("NOCOLOR")
2568                 if nocolor is not None:
2569                         fetch_env["NOCOLOR"] = nocolor
2570
2571                 fetch_env["PORTAGE_NICENESS"] = "0"
2572                 if self.prefetch:
2573                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2574
2575                 ebuild_binary = os.path.join(
2576                         settings["PORTAGE_BIN_PATH"], "ebuild")
2577
2578                 fetch_args = [ebuild_binary, ebuild_path, phase]
2579                 debug = settings.get("PORTAGE_DEBUG") == "1"
2580                 if debug:
2581                         fetch_args.append("--debug")
2582
2583                 self.args = fetch_args
2584                 self.env = fetch_env
2585                 SpawnProcess._start(self)
2586
2587         def _pipe(self, fd_pipes):
2588                 """When appropriate, use a pty so that fetcher progress bars,
2589                 like wget has, will work properly."""
2590                 if self.background or not sys.stdout.isatty():
2591                         # When the output only goes to a log file,
2592                         # there's no point in creating a pty.
2593                         return os.pipe()
2594                 stdout_pipe = fd_pipes.get(1)
2595                 got_pty, master_fd, slave_fd = \
2596                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2597                 return (master_fd, slave_fd)
2598
2599         def _set_returncode(self, wait_retval):
2600                 SpawnProcess._set_returncode(self, wait_retval)
2601                 # Collect elog messages that might have been
2602                 # created by the pkg_nofetch phase.
2603                 if self._build_dir is not None:
2604                         # Skip elog messages for prefetch, in order to avoid duplicates.
2605                         if not self.prefetch and self.returncode != os.EX_OK:
2606                                 elog_out = None
2607                                 if self.logfile is not None:
2608                                         if self.background:
2609                                                 elog_out = open(self.logfile, 'a')
2610                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2611                                 if self.logfile is not None:
2612                                         msg += ", Log file:"
2613                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2614                                 if self.logfile is not None:
2615                                         eerror(" '%s'" % (self.logfile,),
2616                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2617                                 if elog_out is not None:
2618                                         elog_out.close()
2619                         if not self.prefetch:
2620                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2621                         features = self._build_dir.settings.features
2622                         if self.returncode == os.EX_OK:
2623                                 self._build_dir.clean_log()
2624                         self._build_dir.unlock()
2625                         self.config_pool.deallocate(self._build_dir.settings)
2626                         self._build_dir = None
2627
2628 class EbuildBuildDir(SlotObject):
2629
2630         __slots__ = ("dir_path", "pkg", "settings",
2631                 "locked", "_catdir", "_lock_obj")
2632
2633         def __init__(self, **kwargs):
2634                 SlotObject.__init__(self, **kwargs)
2635                 self.locked = False
2636
2637         def lock(self):
2638                 """
2639                 This raises an AlreadyLocked exception if lock() is called
2640                 while a lock is already held. In order to avoid this, call
2641                 unlock() or check whether the "locked" attribute is True
2642                 or False before calling lock().
2643                 """
2644                 if self._lock_obj is not None:
2645                         raise self.AlreadyLocked((self._lock_obj,))
2646
2647                 dir_path = self.dir_path
2648                 if dir_path is None:
2649                         root_config = self.pkg.root_config
2650                         portdb = root_config.trees["porttree"].dbapi
2651                         ebuild_path = portdb.findname(self.pkg.cpv)
2652                         settings = self.settings
2653                         settings.setcpv(self.pkg)
2654                         debug = settings.get("PORTAGE_DEBUG") == "1"
2655                         use_cache = 1 # always true
2656                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2657                                 self.settings, debug, use_cache, portdb)
2658                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2659
2660                 catdir = os.path.dirname(dir_path)
2661                 self._catdir = catdir
2662
2663                 portage.util.ensure_dirs(os.path.dirname(catdir),
2664                         gid=portage.portage_gid,
2665                         mode=070, mask=0)
2666                 catdir_lock = None
2667                 try:
2668                         catdir_lock = portage.locks.lockdir(catdir)
2669                         portage.util.ensure_dirs(catdir,
2670                                 gid=portage.portage_gid,
2671                                 mode=070, mask=0)
2672                         self._lock_obj = portage.locks.lockdir(dir_path)
2673                 finally:
2674                         self.locked = self._lock_obj is not None
2675                         if catdir_lock is not None:
2676                                 portage.locks.unlockdir(catdir_lock)
2677
2678         def clean_log(self):
2679                 """Discard existing log."""
2680                 settings = self.settings
2681
2682                 for x in ('.logid', 'temp/build.log'):
2683                         try:
2684                                 os.unlink(os.path.join(settings["PORTAGE_BUILDDIR"], x))
2685                         except OSError:
2686                                 pass
2687
2688         def unlock(self):
2689                 if self._lock_obj is None:
2690                         return
2691
2692                 portage.locks.unlockdir(self._lock_obj)
2693                 self._lock_obj = None
2694                 self.locked = False
2695
2696                 catdir = self._catdir
2697                 catdir_lock = None
2698                 try:
2699                         catdir_lock = portage.locks.lockdir(catdir)
2700                 finally:
2701                         if catdir_lock:
2702                                 try:
2703                                         os.rmdir(catdir)
2704                                 except OSError, e:
2705                                         if e.errno not in (errno.ENOENT,
2706                                                 errno.ENOTEMPTY, errno.EEXIST):
2707                                                 raise
2708                                         del e
2709                                 portage.locks.unlockdir(catdir_lock)
2710
2711         class AlreadyLocked(portage.exception.PortageException):
2712                 pass
2713
2714 class EbuildBuild(CompositeTask):
2715
2716         __slots__ = ("args_set", "config_pool", "find_blockers",
2717                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2718                 "prefetcher", "settings", "world_atom") + \
2719                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2720
2721         def _start(self):
2722
2723                 logger = self.logger
2724                 opts = self.opts
2725                 pkg = self.pkg
2726                 settings = self.settings
2727                 world_atom = self.world_atom
2728                 root_config = pkg.root_config
2729                 tree = "porttree"
2730                 self._tree = tree
2731                 portdb = root_config.trees[tree].dbapi
2732                 settings.setcpv(pkg)
2733                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2734                 ebuild_path = portdb.findname(self.pkg.cpv)
2735                 self._ebuild_path = ebuild_path
2736
2737                 prefetcher = self.prefetcher
2738                 if prefetcher is None:
2739                         pass
2740                 elif not prefetcher.isAlive():
2741                         prefetcher.cancel()
2742                 elif prefetcher.poll() is None:
2743
2744                         waiting_msg = "Fetching files " + \
2745                                 "in the background. " + \
2746                                 "To view fetch progress, run `tail -f " + \
2747                                 "/var/log/emerge-fetch.log` in another " + \
2748                                 "terminal."
2749                         msg_prefix = colorize("GOOD", " * ")
2750                         from textwrap import wrap
2751                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2752                                 for line in wrap(waiting_msg, 65))
2753                         if not self.background:
2754                                 writemsg(waiting_msg, noiselevel=-1)
2755
2756                         self._current_task = prefetcher
2757                         prefetcher.addExitListener(self._prefetch_exit)
2758                         return
2759
2760                 self._prefetch_exit(prefetcher)
2761
2762         def _prefetch_exit(self, prefetcher):
2763
2764                 opts = self.opts
2765                 pkg = self.pkg
2766                 settings = self.settings
2767
2768                 if opts.fetchonly:
2769                                 fetcher = EbuildFetchonly(
2770                                         fetch_all=opts.fetch_all_uri,
2771                                         pkg=pkg, pretend=opts.pretend,
2772                                         settings=settings)
2773                                 retval = fetcher.execute()
2774                                 self.returncode = retval
2775                                 self.wait()
2776                                 return
2777
2778                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2779                         fetchall=opts.fetch_all_uri,
2780                         fetchonly=opts.fetchonly,
2781                         background=self.background,
2782                         pkg=pkg, scheduler=self.scheduler)
2783
2784                 self._start_task(fetcher, self._fetch_exit)
2785
2786         def _fetch_exit(self, fetcher):
2787                 opts = self.opts
2788                 pkg = self.pkg
2789
2790                 fetch_failed = False
2791                 if opts.fetchonly:
2792                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2793                 else:
2794                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2795
2796                 if fetch_failed and fetcher.logfile is not None and \
2797                         os.path.exists(fetcher.logfile):
2798                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2799
2800                 if not fetch_failed and fetcher.logfile is not None:
2801                         # Fetch was successful, so remove the fetch log.
2802                         try:
2803                                 os.unlink(fetcher.logfile)
2804                         except OSError:
2805                                 pass
2806
2807                 if fetch_failed or opts.fetchonly:
2808                         self.wait()
2809                         return
2810
2811                 logger = self.logger
2812                 opts = self.opts
2813                 pkg_count = self.pkg_count
2814                 scheduler = self.scheduler
2815                 settings = self.settings
2816                 features = settings.features
2817                 ebuild_path = self._ebuild_path
2818                 system_set = pkg.root_config.sets["system"]
2819
2820                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2821                 self._build_dir.lock()
2822
2823                 # Cleaning is triggered before the setup
2824                 # phase, in portage.doebuild().
2825                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2826                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2827                 short_msg = "emerge: (%s of %s) %s Clean" % \
2828                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2829                 logger.log(msg, short_msg=short_msg)
2830
2831                 #buildsyspkg: Check if we need to _force_ binary package creation
2832                 self._issyspkg = "buildsyspkg" in features and \
2833                                 system_set.findAtomForPackage(pkg) and \
2834                                 not opts.buildpkg
2835
2836                 if opts.buildpkg or self._issyspkg:
2837
2838                         self._buildpkg = True
2839
2840                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2841                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2842                         short_msg = "emerge: (%s of %s) %s Compile" % \
2843                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2844                         logger.log(msg, short_msg=short_msg)
2845
2846                 else:
2847                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2848                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2849                         short_msg = "emerge: (%s of %s) %s Compile" % \
2850                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2851                         logger.log(msg, short_msg=short_msg)
2852
2853                 build = EbuildExecuter(background=self.background, pkg=pkg,
2854                         scheduler=scheduler, settings=settings)
2855                 self._start_task(build, self._build_exit)
2856
2857         def _unlock_builddir(self):
2858                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2859                 self._build_dir.unlock()
2860
2861         def _build_exit(self, build):
2862                 if self._default_exit(build) != os.EX_OK:
2863                         self._unlock_builddir()
2864                         self.wait()
2865                         return
2866
2867                 opts = self.opts
2868                 buildpkg = self._buildpkg
2869
2870                 if not buildpkg:
2871                         self._final_exit(build)
2872                         self.wait()
2873                         return
2874
2875                 if self._issyspkg:
2876                         msg = ">>> This is a system package, " + \
2877                                 "let's pack a rescue tarball.\n"
2878
2879                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2880                         if log_path is not None:
2881                                 log_file = open(log_path, 'a')
2882                                 try:
2883                                         log_file.write(msg)
2884                                 finally:
2885                                         log_file.close()
2886
2887                         if not self.background:
2888                                 portage.writemsg_stdout(msg, noiselevel=-1)
2889
2890                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2891                         scheduler=self.scheduler, settings=self.settings)
2892
2893                 self._start_task(packager, self._buildpkg_exit)
2894
2895         def _buildpkg_exit(self, packager):
2896                 """
2897                 Released build dir lock when there is a failure or
2898                 when in buildpkgonly mode. Otherwise, the lock will
2899                 be released when merge() is called.
2900                 """
2901
2902                 if self._default_exit(packager) != os.EX_OK:
2903                         self._unlock_builddir()
2904                         self.wait()
2905                         return
2906
2907                 if self.opts.buildpkgonly:
2908                         # Need to call "clean" phase for buildpkgonly mode
2909                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2910                         phase = "clean"
2911                         clean_phase = EbuildPhase(background=self.background,
2912                                 pkg=self.pkg, phase=phase,
2913                                 scheduler=self.scheduler, settings=self.settings,
2914                                 tree=self._tree)
2915                         self._start_task(clean_phase, self._clean_exit)
2916                         return
2917
2918                 # Continue holding the builddir lock until
2919                 # after the package has been installed.
2920                 self._current_task = None
2921                 self.returncode = packager.returncode
2922                 self.wait()
2923
2924         def _clean_exit(self, clean_phase):
2925                 if self._final_exit(clean_phase) != os.EX_OK or \
2926                         self.opts.buildpkgonly:
2927                         self._unlock_builddir()
2928                 self.wait()
2929
2930         def install(self):
2931                 """
2932                 Install the package and then clean up and release locks.
2933                 Only call this after the build has completed successfully
2934                 and neither fetchonly nor buildpkgonly mode are enabled.
2935                 """
2936
2937                 find_blockers = self.find_blockers
2938                 ldpath_mtimes = self.ldpath_mtimes
2939                 logger = self.logger
2940                 pkg = self.pkg
2941                 pkg_count = self.pkg_count
2942                 settings = self.settings
2943                 world_atom = self.world_atom
2944                 ebuild_path = self._ebuild_path
2945                 tree = self._tree
2946
2947                 merge = EbuildMerge(find_blockers=self.find_blockers,
2948                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2949                         pkg_count=pkg_count, pkg_path=ebuild_path,
2950                         scheduler=self.scheduler,
2951                         settings=settings, tree=tree, world_atom=world_atom)
2952
2953                 msg = " === (%s of %s) Merging (%s::%s)" % \
2954                         (pkg_count.curval, pkg_count.maxval,
2955                         pkg.cpv, ebuild_path)
2956                 short_msg = "emerge: (%s of %s) %s Merge" % \
2957                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2958                 logger.log(msg, short_msg=short_msg)
2959
2960                 try:
2961                         rval = merge.execute()
2962                 finally:
2963                         self._unlock_builddir()
2964
2965                 return rval
2966
2967 class EbuildExecuter(CompositeTask):
2968
2969         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2970
2971         _phases = ("prepare", "configure", "compile", "test", "install")
2972
2973         _live_eclasses = frozenset([
2974                 "bzr",
2975                 "cvs",
2976                 "darcs",
2977                 "git",
2978                 "mercurial",
2979                 "subversion"
2980         ])
2981
2982         def _start(self):
2983                 self._tree = "porttree"
2984                 pkg = self.pkg
2985                 phase = "clean"
2986                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2987                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2988                 self._start_task(clean_phase, self._clean_phase_exit)
2989
2990         def _clean_phase_exit(self, clean_phase):
2991
2992                 if self._default_exit(clean_phase) != os.EX_OK:
2993                         self.wait()
2994                         return
2995
2996                 pkg = self.pkg
2997                 scheduler = self.scheduler
2998                 settings = self.settings
2999                 cleanup = 1
3000
3001                 # This initializes PORTAGE_LOG_FILE.
3002                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
3003
3004                 setup_phase = EbuildPhase(background=self.background,
3005                         pkg=pkg, phase="setup", scheduler=scheduler,
3006                         settings=settings, tree=self._tree)
3007
3008                 setup_phase.addExitListener(self._setup_exit)
3009                 self._current_task = setup_phase
3010                 self.scheduler.scheduleSetup(setup_phase)
3011
3012         def _setup_exit(self, setup_phase):
3013
3014                 if self._default_exit(setup_phase) != os.EX_OK:
3015                         self.wait()
3016                         return
3017
3018                 unpack_phase = EbuildPhase(background=self.background,
3019                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
3020                         settings=self.settings, tree=self._tree)
3021
3022                 if self._live_eclasses.intersection(self.pkg.inherited):
3023                         # Serialize $DISTDIR access for live ebuilds since
3024                         # otherwise they can interfere with eachother.
3025
3026                         unpack_phase.addExitListener(self._unpack_exit)
3027                         self._current_task = unpack_phase
3028                         self.scheduler.scheduleUnpack(unpack_phase)
3029
3030                 else:
3031                         self._start_task(unpack_phase, self._unpack_exit)
3032
3033         def _unpack_exit(self, unpack_phase):
3034
3035                 if self._default_exit(unpack_phase) != os.EX_OK:
3036                         self.wait()
3037                         return
3038
3039                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
3040
3041                 pkg = self.pkg
3042                 phases = self._phases
3043                 eapi = pkg.metadata["EAPI"]
3044                 if eapi in ("0", "1"):
3045                         # skip src_prepare and src_configure
3046                         phases = phases[2:]
3047
3048                 for phase in phases:
3049                         ebuild_phases.add(EbuildPhase(background=self.background,
3050                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3051                                 settings=self.settings, tree=self._tree))
3052
3053                 self._start_task(ebuild_phases, self._default_final_exit)
3054
3055 class EbuildMetadataPhase(SubProcess):
3056
3057         """
3058         Asynchronous interface for the ebuild "depend" phase which is
3059         used to extract metadata from the ebuild.
3060         """
3061
3062         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
3063                 "ebuild_mtime", "metadata", "portdb", "repo_path", "settings") + \
3064                 ("_raw_metadata",)
3065
3066         _file_names = ("ebuild",)
3067         _files_dict = slot_dict_class(_file_names, prefix="")
3068         _metadata_fd = 9
3069
3070         def _start(self):
3071                 settings = self.settings
3072                 settings.setcpv(self.cpv)
3073                 ebuild_path = self.ebuild_path
3074
3075                 eapi = None
3076                 if 'parse-eapi-glep-55' in settings.features:
3077                         pf, eapi = portage._split_ebuild_name_glep55(
3078                                 os.path.basename(ebuild_path))
3079                 if eapi is None and \
3080                         'parse-eapi-ebuild-head' in settings.features:
3081                         eapi = portage._parse_eapi_ebuild_head(codecs.open(ebuild_path,
3082                                 mode='r', encoding='utf_8', errors='replace'))
3083
3084                 if eapi is not None:
3085                         if not portage.eapi_is_supported(eapi):
3086                                 self.metadata_callback(self.cpv, self.ebuild_path,
3087                                         self.repo_path, {'EAPI' : eapi}, self.ebuild_mtime)
3088                                 self.returncode = os.EX_OK
3089                                 self.wait()
3090                                 return
3091
3092                         settings.configdict['pkg']['EAPI'] = eapi
3093
3094                 debug = settings.get("PORTAGE_DEBUG") == "1"
3095                 master_fd = None
3096                 slave_fd = None
3097                 fd_pipes = None
3098                 if self.fd_pipes is not None:
3099                         fd_pipes = self.fd_pipes.copy()
3100                 else:
3101                         fd_pipes = {}
3102
3103                 fd_pipes.setdefault(0, sys.stdin.fileno())
3104                 fd_pipes.setdefault(1, sys.stdout.fileno())
3105                 fd_pipes.setdefault(2, sys.stderr.fileno())
3106
3107                 # flush any pending output
3108                 for fd in fd_pipes.itervalues():
3109                         if fd == sys.stdout.fileno():
3110                                 sys.stdout.flush()
3111                         if fd == sys.stderr.fileno():
3112                                 sys.stderr.flush()
3113
3114                 fd_pipes_orig = fd_pipes.copy()
3115                 self._files = self._files_dict()
3116                 files = self._files
3117
3118                 master_fd, slave_fd = os.pipe()
3119                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
3120                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
3121
3122                 fd_pipes[self._metadata_fd] = slave_fd
3123
3124                 self._raw_metadata = []
3125                 files.ebuild = os.fdopen(master_fd, 'r')
3126                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
3127                         self._registered_events, self._output_handler)
3128                 self._registered = True
3129
3130                 retval = portage.doebuild(ebuild_path, "depend",
3131                         settings["ROOT"], settings, debug,
3132                         mydbapi=self.portdb, tree="porttree",
3133                         fd_pipes=fd_pipes, returnpid=True)
3134
3135                 os.close(slave_fd)
3136
3137                 if isinstance(retval, int):
3138                         # doebuild failed before spawning
3139                         self._unregister()
3140                         self.returncode = retval
3141                         self.wait()
3142                         return
3143
3144                 self.pid = retval[0]
3145                 portage.process.spawned_pids.remove(self.pid)
3146
3147         def _output_handler(self, fd, event):
3148
3149                 if event & PollConstants.POLLIN:
3150                         self._raw_metadata.append(self._files.ebuild.read())
3151                         if not self._raw_metadata[-1]:
3152                                 self._unregister()
3153                                 self.wait()
3154
3155                 self._unregister_if_appropriate(event)
3156                 return self._registered
3157
3158         def _set_returncode(self, wait_retval):
3159                 SubProcess._set_returncode(self, wait_retval)
3160                 if self.returncode == os.EX_OK:
3161                         metadata_lines = "".join(self._raw_metadata).splitlines()
3162                         if len(portage.auxdbkeys) != len(metadata_lines):
3163                                 # Don't trust bash's returncode if the
3164                                 # number of lines is incorrect.
3165                                 self.returncode = 1
3166                         else:
3167                                 metadata = izip(portage.auxdbkeys, metadata_lines)
3168                                 self.metadata = self.metadata_callback(self.cpv,
3169                                         self.ebuild_path, self.repo_path, metadata,
3170                                         self.ebuild_mtime)
3171
3172 class EbuildProcess(SpawnProcess):
3173
3174         __slots__ = ("phase", "pkg", "settings", "tree")
3175
3176         def _start(self):
3177                 # Don't open the log file during the clean phase since the
3178                 # open file can result in an nfs lock on $T/build.log which
3179                 # prevents the clean phase from removing $T.
3180                 if self.phase not in ("clean", "cleanrm"):
3181                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
3182                 SpawnProcess._start(self)
3183
3184         def _pipe(self, fd_pipes):
3185                 stdout_pipe = fd_pipes.get(1)
3186                 got_pty, master_fd, slave_fd = \
3187                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
3188                 return (master_fd, slave_fd)
3189
3190         def _spawn(self, args, **kwargs):
3191
3192                 root_config = self.pkg.root_config
3193                 tree = self.tree
3194                 mydbapi = root_config.trees[tree].dbapi
3195                 settings = self.settings
3196                 ebuild_path = settings["EBUILD"]
3197                 debug = settings.get("PORTAGE_DEBUG") == "1"
3198
3199                 rval = portage.doebuild(ebuild_path, self.phase,
3200                         root_config.root, settings, debug,
3201                         mydbapi=mydbapi, tree=tree, **kwargs)
3202
3203                 return rval
3204
3205         def _set_returncode(self, wait_retval):
3206                 SpawnProcess._set_returncode(self, wait_retval)
3207
3208                 if self.phase not in ("clean", "cleanrm"):
3209                         self.returncode = portage._doebuild_exit_status_check_and_log(
3210                                 self.settings, self.phase, self.returncode)
3211
3212                 if self.phase == "test" and self.returncode != os.EX_OK and \
3213                         "test-fail-continue" in self.settings.features:
3214                         self.returncode = os.EX_OK
3215
3216                 portage._post_phase_userpriv_perms(self.settings)
3217
3218 class EbuildPhase(CompositeTask):
3219
3220         __slots__ = ("background", "pkg", "phase",
3221                 "scheduler", "settings", "tree")
3222
3223         _post_phase_cmds = portage._post_phase_cmds
3224
3225         def _start(self):
3226
3227                 ebuild_process = EbuildProcess(background=self.background,
3228                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3229                         settings=self.settings, tree=self.tree)
3230
3231                 self._start_task(ebuild_process, self._ebuild_exit)
3232
3233         def _ebuild_exit(self, ebuild_process):
3234
3235                 if self.phase == "install":
3236                         out = None
3237                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3238                         log_file = None
3239                         if self.background and log_path is not None:
3240                                 log_file = open(log_path, 'a')
3241                                 out = log_file
3242                         try:
3243                                 portage._check_build_log(self.settings, out=out)
3244                         finally:
3245                                 if log_file is not None:
3246                                         log_file.close()
3247
3248                 if self._default_exit(ebuild_process) != os.EX_OK:
3249                         self.wait()
3250                         return
3251
3252                 settings = self.settings
3253
3254                 if self.phase == "install":
3255                         portage._post_src_install_chost_fix(settings)
3256                         portage._post_src_install_uid_fix(settings)
3257
3258                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3259                 if post_phase_cmds is not None:
3260                         post_phase = MiscFunctionsProcess(background=self.background,
3261                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3262                                 scheduler=self.scheduler, settings=settings)
3263                         self._start_task(post_phase, self._post_phase_exit)
3264                         return
3265
3266                 self.returncode = ebuild_process.returncode
3267                 self._current_task = None
3268                 self.wait()
3269
3270         def _post_phase_exit(self, post_phase):
3271                 if self._final_exit(post_phase) != os.EX_OK:
3272                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3273                                 noiselevel=-1)
3274                 self._current_task = None
3275                 self.wait()
3276                 return
3277
3278 class EbuildBinpkg(EbuildProcess):
3279         """
3280         This assumes that src_install() has successfully completed.
3281         """
3282         __slots__ = ("_binpkg_tmpfile",)
3283
3284         def _start(self):
3285                 self.phase = "package"
3286                 self.tree = "porttree"
3287                 pkg = self.pkg
3288                 root_config = pkg.root_config
3289                 portdb = root_config.trees["porttree"].dbapi
3290                 bintree = root_config.trees["bintree"]
3291                 ebuild_path = portdb.findname(self.pkg.cpv)
3292                 settings = self.settings
3293                 debug = settings.get("PORTAGE_DEBUG") == "1"
3294
3295                 bintree.prevent_collision(pkg.cpv)
3296                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3297                         pkg.cpv + ".tbz2." + str(os.getpid()))
3298                 self._binpkg_tmpfile = binpkg_tmpfile
3299                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3300                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3301
3302                 try:
3303                         EbuildProcess._start(self)
3304                 finally:
3305                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3306
3307         def _set_returncode(self, wait_retval):
3308                 EbuildProcess._set_returncode(self, wait_retval)
3309
3310                 pkg = self.pkg
3311                 bintree = pkg.root_config.trees["bintree"]
3312                 binpkg_tmpfile = self._binpkg_tmpfile
3313                 if self.returncode == os.EX_OK:
3314                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3315
3316 class EbuildMerge(SlotObject):
3317
3318         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3319                 "pkg", "pkg_count", "pkg_path", "pretend",
3320                 "scheduler", "settings", "tree", "world_atom")
3321
3322         def execute(self):
3323                 root_config = self.pkg.root_config
3324                 settings = self.settings
3325                 retval = portage.merge(settings["CATEGORY"],
3326                         settings["PF"], settings["D"],
3327                         os.path.join(settings["PORTAGE_BUILDDIR"],
3328                         "build-info"), root_config.root, settings,
3329                         myebuild=settings["EBUILD"],
3330                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3331                         vartree=root_config.trees["vartree"],
3332                         prev_mtimes=self.ldpath_mtimes,
3333                         scheduler=self.scheduler,
3334                         blockers=self.find_blockers)
3335
3336                 if retval == os.EX_OK:
3337                         self.world_atom(self.pkg)
3338                         self._log_success()
3339
3340                 return retval
3341
3342         def _log_success(self):
3343                 pkg = self.pkg
3344                 pkg_count = self.pkg_count
3345                 pkg_path = self.pkg_path
3346                 logger = self.logger
3347                 if "noclean" not in self.settings.features:
3348                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3349                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3350                         logger.log((" === (%s of %s) " + \
3351                                 "Post-Build Cleaning (%s::%s)") % \
3352                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3353                                 short_msg=short_msg)
3354                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3355                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3356
3357 class PackageUninstall(AsynchronousTask):
3358
3359         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3360
3361         def _start(self):
3362                 try:
3363                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3364                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3365                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3366                                 writemsg_level=self._writemsg_level)
3367                 except UninstallFailure, e:
3368                         self.returncode = e.status
3369                 else:
3370                         self.returncode = os.EX_OK
3371                 self.wait()
3372
3373         def _writemsg_level(self, msg, level=0, noiselevel=0):
3374
3375                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3376                 background = self.background
3377
3378                 if log_path is None:
3379                         if not (background and level < logging.WARNING):
3380                                 portage.util.writemsg_level(msg,
3381                                         level=level, noiselevel=noiselevel)
3382                 else:
3383                         if not background:
3384                                 portage.util.writemsg_level(msg,
3385                                         level=level, noiselevel=noiselevel)
3386
3387                         f = open(log_path, 'a')
3388                         try:
3389                                 f.write(msg)
3390                         finally:
3391                                 f.close()
3392
3393 class Binpkg(CompositeTask):
3394
3395         __slots__ = ("find_blockers",
3396                 "ldpath_mtimes", "logger", "opts",
3397                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3398                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3399                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3400
3401         def _writemsg_level(self, msg, level=0, noiselevel=0):
3402
3403                 if not self.background:
3404                         portage.util.writemsg_level(msg,
3405                                 level=level, noiselevel=noiselevel)
3406
3407                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3408                 if  log_path is not None:
3409                         f = open(log_path, 'a')
3410                         try:
3411                                 f.write(msg)
3412                         finally:
3413                                 f.close()
3414
3415         def _start(self):
3416
3417                 pkg = self.pkg
3418                 settings = self.settings
3419                 settings.setcpv(pkg)
3420                 self._tree = "bintree"
3421                 self._bintree = self.pkg.root_config.trees[self._tree]
3422                 self._verify = not self.opts.pretend
3423
3424                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3425                         "portage", pkg.category, pkg.pf)
3426                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3427                         pkg=pkg, settings=settings)
3428                 self._image_dir = os.path.join(dir_path, "image")
3429                 self._infloc = os.path.join(dir_path, "build-info")
3430                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3431                 settings["EBUILD"] = self._ebuild_path
3432                 debug = settings.get("PORTAGE_DEBUG") == "1"
3433                 portage.doebuild_environment(self._ebuild_path, "setup",
3434                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3435                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3436
3437                 # The prefetcher has already completed or it
3438                 # could be running now. If it's running now,
3439                 # wait for it to complete since it holds
3440                 # a lock on the file being fetched. The
3441                 # portage.locks functions are only designed
3442                 # to work between separate processes. Since
3443                 # the lock is held by the current process,
3444                 # use the scheduler and fetcher methods to
3445                 # synchronize with the fetcher.
3446                 prefetcher = self.prefetcher
3447                 if prefetcher is None:
3448                         pass
3449                 elif not prefetcher.isAlive():
3450                         prefetcher.cancel()
3451                 elif prefetcher.poll() is None:
3452
3453                         waiting_msg = ("Fetching '%s' " + \
3454                                 "in the background. " + \
3455                                 "To view fetch progress, run `tail -f " + \
3456                                 "/var/log/emerge-fetch.log` in another " + \
3457                                 "terminal.") % prefetcher.pkg_path
3458                         msg_prefix = colorize("GOOD", " * ")
3459                         from textwrap import wrap
3460                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3461                                 for line in wrap(waiting_msg, 65))
3462                         if not self.background:
3463                                 writemsg(waiting_msg, noiselevel=-1)
3464
3465                         self._current_task = prefetcher
3466                         prefetcher.addExitListener(self._prefetch_exit)
3467                         return
3468
3469                 self._prefetch_exit(prefetcher)
3470
3471         def _prefetch_exit(self, prefetcher):
3472
3473                 pkg = self.pkg
3474                 pkg_count = self.pkg_count
3475                 if not (self.opts.pretend or self.opts.fetchonly):
3476                         self._build_dir.lock()
3477                         # If necessary, discard old log so that we don't
3478                         # append to it.
3479                         self._build_dir.clean_log()
3480                         # Initialze PORTAGE_LOG_FILE.
3481                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3482                 fetcher = BinpkgFetcher(background=self.background,
3483                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3484                         pretend=self.opts.pretend, scheduler=self.scheduler)
3485                 pkg_path = fetcher.pkg_path
3486                 self._pkg_path = pkg_path
3487
3488                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3489
3490                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3491                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3492                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3493                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3494                         self.logger.log(msg, short_msg=short_msg)
3495                         self._start_task(fetcher, self._fetcher_exit)
3496                         return
3497
3498                 self._fetcher_exit(fetcher)
3499
3500         def _fetcher_exit(self, fetcher):
3501
3502                 # The fetcher only has a returncode when
3503                 # --getbinpkg is enabled.
3504                 if fetcher.returncode is not None:
3505                         self._fetched_pkg = True
3506                         if self._default_exit(fetcher) != os.EX_OK:
3507                                 self._unlock_builddir()
3508                                 self.wait()
3509                                 return
3510
3511                 if self.opts.pretend:
3512                         self._current_task = None
3513                         self.returncode = os.EX_OK
3514                         self.wait()
3515                         return
3516
3517                 verifier = None
3518                 if self._verify:
3519                         logfile = None
3520                         if self.background:
3521                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3522                         verifier = BinpkgVerifier(background=self.background,
3523                                 logfile=logfile, pkg=self.pkg)
3524                         self._start_task(verifier, self._verifier_exit)
3525                         return
3526
3527                 self._verifier_exit(verifier)
3528
3529         def _verifier_exit(self, verifier):
3530                 if verifier is not None and \
3531                         self._default_exit(verifier) != os.EX_OK:
3532                         self._unlock_builddir()
3533                         self.wait()
3534                         return
3535
3536                 logger = self.logger
3537                 pkg = self.pkg
3538                 pkg_count = self.pkg_count
3539                 pkg_path = self._pkg_path
3540
3541                 if self._fetched_pkg:
3542                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3543
3544                 if self.opts.fetchonly:
3545                         self._current_task = None
3546                         self.returncode = os.EX_OK
3547                         self.wait()
3548                         return
3549
3550                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3551                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3552                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3553                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3554                 logger.log(msg, short_msg=short_msg)
3555
3556                 phase = "clean"
3557                 settings = self.settings
3558                 ebuild_phase = EbuildPhase(background=self.background,
3559                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3560                         settings=settings, tree=self._tree)
3561
3562                 self._start_task(ebuild_phase, self._clean_exit)
3563
3564         def _clean_exit(self, clean_phase):
3565                 if self._default_exit(clean_phase) != os.EX_OK:
3566                         self._unlock_builddir()
3567                         self.wait()
3568                         return
3569
3570                 dir_path = self._build_dir.dir_path
3571
3572                 infloc = self._infloc
3573                 pkg = self.pkg
3574                 pkg_path = self._pkg_path
3575
3576                 dir_mode = 0755
3577                 for mydir in (dir_path, self._image_dir, infloc):
3578                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3579                                 gid=portage.data.portage_gid, mode=dir_mode)
3580
3581                 # This initializes PORTAGE_LOG_FILE.
3582                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3583                 self._writemsg_level(">>> Extracting info\n")
3584
3585                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3586                 check_missing_metadata = ("CATEGORY", "PF")
3587                 missing_metadata = set()
3588                 for k in check_missing_metadata:
3589                         v = pkg_xpak.getfile(k)
3590                         if not v:
3591                                 missing_metadata.add(k)
3592
3593                 pkg_xpak.unpackinfo(infloc)
3594                 for k in missing_metadata:
3595                         if k == "CATEGORY":
3596                                 v = pkg.category
3597                         elif k == "PF":
3598                                 v = pkg.pf
3599                         else:
3600                                 continue
3601
3602                         f = open(os.path.join(infloc, k), 'wb')
3603                         try:
3604                                 f.write(v + "\n")
3605                         finally:
3606                                 f.close()
3607
3608                 # Store the md5sum in the vdb.
3609                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3610                 try:
3611                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3612                 finally:
3613                         f.close()
3614
3615                 # This gives bashrc users an opportunity to do various things
3616                 # such as remove binary packages after they're installed.
3617                 settings = self.settings
3618                 settings.setcpv(self.pkg)
3619                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3620                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3621
3622                 phase = "setup"
3623                 setup_phase = EbuildPhase(background=self.background,
3624                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3625                         settings=settings, tree=self._tree)
3626
3627                 setup_phase.addExitListener(self._setup_exit)
3628                 self._current_task = setup_phase
3629                 self.scheduler.scheduleSetup(setup_phase)
3630
3631         def _setup_exit(self, setup_phase):
3632                 if self._default_exit(setup_phase) != os.EX_OK:
3633                         self._unlock_builddir()
3634                         self.wait()
3635                         return
3636
3637                 extractor = BinpkgExtractorAsync(background=self.background,
3638                         image_dir=self._image_dir,
3639                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3640                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3641                 self._start_task(extractor, self._extractor_exit)
3642
3643         def _extractor_exit(self, extractor):
3644                 if self._final_exit(extractor) != os.EX_OK:
3645                         self._unlock_builddir()
3646                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3647                                 noiselevel=-1)
3648                 self.wait()
3649
3650         def _unlock_builddir(self):
3651                 if self.opts.pretend or self.opts.fetchonly:
3652                         return
3653                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3654                 self._build_dir.unlock()
3655
3656         def install(self):
3657
3658                 # This gives bashrc users an opportunity to do various things
3659                 # such as remove binary packages after they're installed.
3660                 settings = self.settings
3661                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3662                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3663
3664                 merge = EbuildMerge(find_blockers=self.find_blockers,
3665                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3666                         pkg=self.pkg, pkg_count=self.pkg_count,
3667                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3668                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3669
3670                 try:
3671                         retval = merge.execute()
3672                 finally:
3673                         settings.pop("PORTAGE_BINPKG_FILE", None)
3674                         self._unlock_builddir()
3675                 return retval
3676
3677 class BinpkgFetcher(SpawnProcess):
3678
3679         __slots__ = ("pkg", "pretend",
3680                 "locked", "pkg_path", "_lock_obj")
3681
3682         def __init__(self, **kwargs):
3683                 SpawnProcess.__init__(self, **kwargs)
3684                 pkg = self.pkg
3685                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3686
3687         def _start(self):
3688
3689                 if self.cancelled:
3690                         return
3691
3692                 pkg = self.pkg
3693                 pretend = self.pretend
3694                 bintree = pkg.root_config.trees["bintree"]
3695                 settings = bintree.settings
3696                 use_locks = "distlocks" in settings.features
3697                 pkg_path = self.pkg_path
3698
3699                 if not pretend:
3700                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3701                         if use_locks:
3702                                 self.lock()
3703                 exists = os.path.exists(pkg_path)
3704                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3705                 if not (pretend or resume):
3706                         # Remove existing file or broken symlink.
3707                         try:
3708                                 os.unlink(pkg_path)
3709                         except OSError:
3710                                 pass
3711
3712                 # urljoin doesn't work correctly with
3713                 # unrecognized protocols like sftp
3714                 if bintree._remote_has_index:
3715                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3716                         if not rel_uri:
3717                                 rel_uri = pkg.cpv + ".tbz2"
3718                         uri = bintree._remote_base_uri.rstrip("/") + \
3719                                 "/" + rel_uri.lstrip("/")
3720                 else:
3721                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3722                                 "/" + pkg.pf + ".tbz2"
3723
3724                 if pretend:
3725                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3726                         self.returncode = os.EX_OK
3727                         self.wait()
3728                         return
3729
3730                 protocol = urlparse.urlparse(uri)[0]
3731                 fcmd_prefix = "FETCHCOMMAND"
3732                 if resume:
3733                         fcmd_prefix = "RESUMECOMMAND"
3734                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3735                 if not fcmd:
3736                         fcmd = settings.get(fcmd_prefix)
3737
3738                 fcmd_vars = {
3739                         "DISTDIR" : os.path.dirname(pkg_path),
3740                         "URI"     : uri,
3741                         "FILE"    : os.path.basename(pkg_path)
3742                 }
3743
3744                 fetch_env = dict(settings.iteritems())
3745                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3746                         for x in shlex.split(fcmd)]
3747
3748                 if self.fd_pipes is None:
3749                         self.fd_pipes = {}
3750                 fd_pipes = self.fd_pipes
3751
3752                 # Redirect all output to stdout since some fetchers like
3753                 # wget pollute stderr (if portage detects a problem then it
3754                 # can send it's own message to stderr).
3755                 fd_pipes.setdefault(0, sys.stdin.fileno())
3756                 fd_pipes.setdefault(1, sys.stdout.fileno())
3757                 fd_pipes.setdefault(2, sys.stdout.fileno())
3758
3759                 self.args = fetch_args
3760                 self.env = fetch_env
3761                 SpawnProcess._start(self)
3762
3763         def _set_returncode(self, wait_retval):
3764                 SpawnProcess._set_returncode(self, wait_retval)
3765                 if self.returncode == os.EX_OK:
3766                         # If possible, update the mtime to match the remote package if
3767                         # the fetcher didn't already do it automatically.
3768                         bintree = self.pkg.root_config.trees["bintree"]
3769                         if bintree._remote_has_index:
3770                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3771                                 if remote_mtime is not None:
3772                                         try:
3773                                                 remote_mtime = long(remote_mtime)
3774                                         except ValueError:
3775                                                 pass
3776                                         else:
3777                                                 try:
3778                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3779                                                 except OSError:
3780                                                         pass
3781                                                 else:
3782                                                         if remote_mtime != local_mtime:
3783                                                                 try:
3784                                                                         os.utime(self.pkg_path,
3785                                                                                 (remote_mtime, remote_mtime))
3786                                                                 except OSError:
3787                                                                         pass
3788
3789                 if self.locked:
3790                         self.unlock()
3791
3792         def lock(self):
3793                 """
3794                 This raises an AlreadyLocked exception if lock() is called
3795                 while a lock is already held. In order to avoid this, call
3796                 unlock() or check whether the "locked" attribute is True
3797                 or False before calling lock().
3798                 """
3799                 if self._lock_obj is not None:
3800                         raise self.AlreadyLocked((self._lock_obj,))
3801
3802                 self._lock_obj = portage.locks.lockfile(
3803                         self.pkg_path, wantnewlockfile=1)
3804                 self.locked = True
3805
3806         class AlreadyLocked(portage.exception.PortageException):
3807                 pass
3808
3809         def unlock(self):
3810                 if self._lock_obj is None:
3811                         return
3812                 portage.locks.unlockfile(self._lock_obj)
3813                 self._lock_obj = None
3814                 self.locked = False
3815
3816 class BinpkgVerifier(AsynchronousTask):
3817         __slots__ = ("logfile", "pkg",)
3818
3819         def _start(self):
3820                 """
3821                 Note: Unlike a normal AsynchronousTask.start() method,
3822                 this one does all work is synchronously. The returncode
3823                 attribute will be set before it returns.
3824                 """
3825
3826                 pkg = self.pkg
3827                 root_config = pkg.root_config
3828                 bintree = root_config.trees["bintree"]
3829                 rval = os.EX_OK
3830                 stdout_orig = sys.stdout
3831                 stderr_orig = sys.stderr
3832                 log_file = None
3833                 if self.background and self.logfile is not None:
3834                         log_file = open(self.logfile, 'a')
3835                 try:
3836                         if log_file is not None:
3837                                 sys.stdout = log_file
3838                                 sys.stderr = log_file
3839                         try:
3840                                 bintree.digestCheck(pkg)
3841                         except portage.exception.FileNotFound:
3842                                 writemsg("!!! Fetching Binary failed " + \
3843                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3844                                 rval = 1
3845                         except portage.exception.DigestException, e:
3846                                 writemsg("\n!!! Digest verification failed:\n",
3847                                         noiselevel=-1)
3848                                 writemsg("!!! %s\n" % e.value[0],
3849                                         noiselevel=-1)
3850                                 writemsg("!!! Reason: %s\n" % e.value[1],
3851                                         noiselevel=-1)
3852                                 writemsg("!!! Got: %s\n" % e.value[2],
3853                                         noiselevel=-1)
3854                                 writemsg("!!! Expected: %s\n" % e.value[3],
3855                                         noiselevel=-1)
3856                                 rval = 1
3857                         if rval != os.EX_OK:
3858                                 pkg_path = bintree.getname(pkg.cpv)
3859                                 head, tail = os.path.split(pkg_path)
3860                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3861                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3862                                         noiselevel=-1)
3863                 finally:
3864                         sys.stdout = stdout_orig
3865                         sys.stderr = stderr_orig
3866                         if log_file is not None:
3867                                 log_file.close()
3868
3869                 self.returncode = rval
3870                 self.wait()
3871
3872 class BinpkgPrefetcher(CompositeTask):
3873
3874         __slots__ = ("pkg",) + \
3875                 ("pkg_path", "_bintree",)
3876
3877         def _start(self):
3878                 self._bintree = self.pkg.root_config.trees["bintree"]
3879                 fetcher = BinpkgFetcher(background=self.background,
3880                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3881                         scheduler=self.scheduler)
3882                 self.pkg_path = fetcher.pkg_path
3883                 self._start_task(fetcher, self._fetcher_exit)
3884
3885         def _fetcher_exit(self, fetcher):
3886
3887                 if self._default_exit(fetcher) != os.EX_OK:
3888                         self.wait()
3889                         return
3890
3891                 verifier = BinpkgVerifier(background=self.background,
3892                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3893                 self._start_task(verifier, self._verifier_exit)
3894
3895         def _verifier_exit(self, verifier):
3896                 if self._default_exit(verifier) != os.EX_OK:
3897                         self.wait()
3898                         return
3899
3900                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3901
3902                 self._current_task = None
3903                 self.returncode = os.EX_OK
3904                 self.wait()
3905
3906 class BinpkgExtractorAsync(SpawnProcess):
3907
3908         __slots__ = ("image_dir", "pkg", "pkg_path")
3909
3910         _shell_binary = portage.const.BASH_BINARY
3911
3912         def _start(self):
3913                 self.args = [self._shell_binary, "-c",
3914                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3915                         (portage._shell_quote(self.pkg_path),
3916                         portage._shell_quote(self.image_dir))]
3917
3918                 self.env = self.pkg.root_config.settings.environ()
3919                 SpawnProcess._start(self)
3920
3921 class MergeListItem(CompositeTask):
3922
3923         """
3924         TODO: For parallel scheduling, everything here needs asynchronous
3925         execution support (start, poll, and wait methods).
3926         """
3927
3928         __slots__ = ("args_set",
3929                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3930                 "find_blockers", "logger", "mtimedb", "pkg",
3931                 "pkg_count", "pkg_to_replace", "prefetcher",
3932                 "settings", "statusMessage", "world_atom") + \
3933                 ("_install_task",)
3934
3935         def _start(self):
3936
3937                 pkg = self.pkg
3938                 build_opts = self.build_opts
3939
3940                 if pkg.installed:
3941                         # uninstall,  executed by self.merge()
3942                         self.returncode = os.EX_OK
3943                         self.wait()
3944                         return
3945
3946                 args_set = self.args_set
3947                 find_blockers = self.find_blockers
3948                 logger = self.logger
3949                 mtimedb = self.mtimedb
3950                 pkg_count = self.pkg_count
3951                 scheduler = self.scheduler
3952                 settings = self.settings
3953                 world_atom = self.world_atom
3954                 ldpath_mtimes = mtimedb["ldpath"]
3955
3956                 action_desc = "Emerging"
3957                 preposition = "for"
3958                 if pkg.type_name == "binary":
3959                         action_desc += " binary"
3960
3961                 if build_opts.fetchonly:
3962                         action_desc = "Fetching"
3963
3964                 msg = "%s (%s of %s) %s" % \
3965                         (action_desc,
3966                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3967                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3968                         colorize("GOOD", pkg.cpv))
3969
3970                 portdb = pkg.root_config.trees["porttree"].dbapi
3971                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3972                 if portdir_repo_name:
3973                         pkg_repo_name = pkg.metadata.get("repository")
3974                         if pkg_repo_name != portdir_repo_name:
3975                                 if not pkg_repo_name:
3976                                         pkg_repo_name = "unknown repo"
3977                                 msg += " from %s" % pkg_repo_name
3978
3979                 if pkg.root != "/":
3980                         msg += " %s %s" % (preposition, pkg.root)
3981
3982                 if not build_opts.pretend:
3983                         self.statusMessage(msg)
3984                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3985                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3986
3987                 if pkg.type_name == "ebuild":
3988
3989                         build = EbuildBuild(args_set=args_set,
3990                                 background=self.background,
3991                                 config_pool=self.config_pool,
3992                                 find_blockers=find_blockers,
3993                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3994                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3995                                 prefetcher=self.prefetcher, scheduler=scheduler,
3996                                 settings=settings, world_atom=world_atom)
3997
3998                         self._install_task = build
3999                         self._start_task(build, self._default_final_exit)
4000                         return
4001
4002                 elif pkg.type_name == "binary":
4003
4004                         binpkg = Binpkg(background=self.background,
4005                                 find_blockers=find_blockers,
4006                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
4007                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
4008                                 prefetcher=self.prefetcher, settings=settings,
4009                                 scheduler=scheduler, world_atom=world_atom)
4010
4011                         self._install_task = binpkg
4012                         self._start_task(binpkg, self._default_final_exit)
4013                         return
4014
4015         def _poll(self):
4016                 self._install_task.poll()
4017                 return self.returncode
4018
4019         def _wait(self):
4020                 self._install_task.wait()
4021                 return self.returncode
4022
4023         def merge(self):
4024
4025                 pkg = self.pkg
4026                 build_opts = self.build_opts
4027                 find_blockers = self.find_blockers
4028                 logger = self.logger
4029                 mtimedb = self.mtimedb
4030                 pkg_count = self.pkg_count
4031                 prefetcher = self.prefetcher
4032                 scheduler = self.scheduler
4033                 settings = self.settings
4034                 world_atom = self.world_atom
4035                 ldpath_mtimes = mtimedb["ldpath"]
4036
4037                 if pkg.installed:
4038                         if not (build_opts.buildpkgonly or \
4039                                 build_opts.fetchonly or build_opts.pretend):
4040
4041                                 uninstall = PackageUninstall(background=self.background,
4042                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
4043                                         pkg=pkg, scheduler=scheduler, settings=settings)
4044
4045                                 uninstall.start()
4046                                 retval = uninstall.wait()
4047                                 if retval != os.EX_OK:
4048                                         return retval
4049                         return os.EX_OK
4050
4051                 if build_opts.fetchonly or \
4052                         build_opts.buildpkgonly:
4053                         return self.returncode
4054
4055                 retval = self._install_task.install()
4056                 return retval
4057
4058 class PackageMerge(AsynchronousTask):
4059         """
4060         TODO: Implement asynchronous merge so that the scheduler can
4061         run while a merge is executing.
4062         """
4063
4064         __slots__ = ("merge",)
4065
4066         def _start(self):
4067
4068                 pkg = self.merge.pkg
4069                 pkg_count = self.merge.pkg_count
4070
4071                 if pkg.installed:
4072                         action_desc = "Uninstalling"
4073                         preposition = "from"
4074                         counter_str = ""
4075                 else:
4076                         action_desc = "Installing"
4077                         preposition = "to"
4078                         counter_str = "(%s of %s) " % \
4079                                 (colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
4080                                 colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)))
4081
4082                 msg = "%s %s%s" % \
4083                         (action_desc,
4084                         counter_str,
4085                         colorize("GOOD", pkg.cpv))
4086
4087                 if pkg.root != "/":
4088                         msg += " %s %s" % (preposition, pkg.root)
4089
4090                 if not self.merge.build_opts.fetchonly and \
4091                         not self.merge.build_opts.pretend and \
4092                         not self.merge.build_opts.buildpkgonly:
4093                         self.merge.statusMessage(msg)
4094
4095                 self.returncode = self.merge.merge()
4096                 self.wait()
4097
4098 class DependencyArg(object):
4099         def __init__(self, arg=None, root_config=None):
4100                 self.arg = arg
4101                 self.root_config = root_config
4102
4103         def __str__(self):
4104                 return str(self.arg)
4105
4106 class AtomArg(DependencyArg):
4107         def __init__(self, atom=None, **kwargs):
4108                 DependencyArg.__init__(self, **kwargs)
4109                 self.atom = atom
4110                 if not isinstance(self.atom, portage.dep.Atom):
4111                         self.atom = portage.dep.Atom(self.atom)
4112                 self.set = (self.atom, )
4113
4114 class PackageArg(DependencyArg):
4115         def __init__(self, package=None, **kwargs):
4116                 DependencyArg.__init__(self, **kwargs)
4117                 self.package = package
4118                 self.atom = portage.dep.Atom("=" + package.cpv)
4119                 self.set = (self.atom, )
4120
4121 class SetArg(DependencyArg):
4122         def __init__(self, set=None, **kwargs):
4123                 DependencyArg.__init__(self, **kwargs)
4124                 self.set = set
4125                 self.name = self.arg[len(SETPREFIX):]
4126
4127 class Dependency(SlotObject):
4128         __slots__ = ("atom", "blocker", "depth",
4129                 "parent", "onlydeps", "priority", "root")
4130         def __init__(self, **kwargs):
4131                 SlotObject.__init__(self, **kwargs)
4132                 if self.priority is None:
4133                         self.priority = DepPriority()
4134                 if self.depth is None:
4135                         self.depth = 0
4136
4137 class BlockerCache(portage.cache.mappings.MutableMapping):
4138         """This caches blockers of installed packages so that dep_check does not
4139         have to be done for every single installed package on every invocation of
4140         emerge.  The cache is invalidated whenever it is detected that something
4141         has changed that might alter the results of dep_check() calls:
4142                 1) the set of installed packages (including COUNTER) has changed
4143                 2) the old-style virtuals have changed
4144         """
4145
4146         # Number of uncached packages to trigger cache update, since
4147         # it's wasteful to update it for every vdb change.
4148         _cache_threshold = 5
4149
4150         class BlockerData(object):
4151
4152                 __slots__ = ("__weakref__", "atoms", "counter")
4153
4154                 def __init__(self, counter, atoms):
4155                         self.counter = counter
4156                         self.atoms = atoms
4157
4158         def __init__(self, myroot, vardb):
4159                 self._vardb = vardb
4160                 self._virtuals = vardb.settings.getvirtuals()
4161                 self._cache_filename = os.path.join(myroot,
4162                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
4163                 self._cache_version = "1"
4164                 self._cache_data = None
4165                 self._modified = set()
4166                 self._load()
4167
4168         def _load(self):
4169                 try:
4170                         f = open(self._cache_filename, mode='rb')
4171                         mypickle = pickle.Unpickler(f)
4172                         try:
4173                                 mypickle.find_global = None
4174                         except AttributeError:
4175                                 # TODO: If py3k, override Unpickler.find_class().
4176                                 pass
4177                         self._cache_data = mypickle.load()
4178                         f.close()
4179                         del f
4180                 except (IOError, OSError, EOFError, ValueError, pickle.UnpicklingError), e:
4181                         if isinstance(e, pickle.UnpicklingError):
4182                                 writemsg("!!! Error loading '%s': %s\n" % \
4183                                         (self._cache_filename, str(e)), noiselevel=-1)
4184                         del e
4185
4186                 cache_valid = self._cache_data and \
4187                         isinstance(self._cache_data, dict) and \
4188                         self._cache_data.get("version") == self._cache_version and \
4189                         isinstance(self._cache_data.get("blockers"), dict)
4190                 if cache_valid:
4191                         # Validate all the atoms and counters so that
4192                         # corruption is detected as soon as possible.
4193                         invalid_items = set()
4194                         for k, v in self._cache_data["blockers"].iteritems():
4195                                 if not isinstance(k, basestring):
4196                                         invalid_items.add(k)
4197                                         continue
4198                                 try:
4199                                         if portage.catpkgsplit(k) is None:
4200                                                 invalid_items.add(k)
4201                                                 continue
4202                                 except portage.exception.InvalidData:
4203                                         invalid_items.add(k)
4204                                         continue
4205                                 if not isinstance(v, tuple) or \
4206                                         len(v) != 2:
4207                                         invalid_items.add(k)
4208                                         continue
4209                                 counter, atoms = v
4210                                 if not isinstance(counter, (int, long)):
4211                                         invalid_items.add(k)
4212                                         continue
4213                                 if not isinstance(atoms, (list, tuple)):
4214                                         invalid_items.add(k)
4215                                         continue
4216                                 invalid_atom = False
4217                                 for atom in atoms:
4218                                         if not isinstance(atom, basestring):
4219                                                 invalid_atom = True
4220                                                 break
4221                                         if atom[:1] != "!" or \
4222                                                 not portage.isvalidatom(
4223                                                 atom, allow_blockers=True):
4224                                                 invalid_atom = True
4225                                                 break
4226                                 if invalid_atom:
4227                                         invalid_items.add(k)
4228                                         continue
4229
4230                         for k in invalid_items:
4231                                 del self._cache_data["blockers"][k]
4232                         if not self._cache_data["blockers"]:
4233                                 cache_valid = False
4234
4235                 if not cache_valid:
4236                         self._cache_data = {"version":self._cache_version}
4237                         self._cache_data["blockers"] = {}
4238                         self._cache_data["virtuals"] = self._virtuals
4239                 self._modified.clear()
4240
4241         def flush(self):
4242                 """If the current user has permission and the internal blocker cache
4243                 been updated, save it to disk and mark it unmodified.  This is called
4244                 by emerge after it has proccessed blockers for all installed packages.
4245                 Currently, the cache is only written if the user has superuser
4246                 privileges (since that's required to obtain a lock), but all users
4247                 have read access and benefit from faster blocker lookups (as long as
4248                 the entire cache is still valid).  The cache is stored as a pickled
4249                 dict object with the following format:
4250
4251                 {
4252                         version : "1",
4253                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4254                         "virtuals" : vardb.settings.getvirtuals()
4255                 }
4256                 """
4257                 if len(self._modified) >= self._cache_threshold and \
4258                         secpass >= 2:
4259                         try:
4260                                 f = portage.util.atomic_ofstream(self._cache_filename, mode='wb')
4261                                 pickle.dump(self._cache_data, f, protocol=2)
4262                                 f.close()
4263                                 portage.util.apply_secpass_permissions(
4264                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4265                         except (IOError, OSError), e:
4266                                 pass
4267                         self._modified.clear()
4268
4269         def __setitem__(self, cpv, blocker_data):
4270                 """
4271                 Update the cache and mark it as modified for a future call to
4272                 self.flush().
4273
4274                 @param cpv: Package for which to cache blockers.
4275                 @type cpv: String
4276                 @param blocker_data: An object with counter and atoms attributes.
4277                 @type blocker_data: BlockerData
4278                 """
4279                 self._cache_data["blockers"][cpv] = \
4280                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4281                 self._modified.add(cpv)
4282
4283         def __iter__(self):
4284                 if self._cache_data is None:
4285                         # triggered by python-trace
4286                         return iter([])
4287                 return iter(self._cache_data["blockers"])
4288
4289         def __delitem__(self, cpv):
4290                 del self._cache_data["blockers"][cpv]
4291
4292         def __getitem__(self, cpv):
4293                 """
4294                 @rtype: BlockerData
4295                 @returns: An object with counter and atoms attributes.
4296                 """
4297                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4298
4299 class BlockerDB(object):
4300
4301         def __init__(self, root_config):
4302                 self._root_config = root_config
4303                 self._vartree = root_config.trees["vartree"]
4304                 self._portdb = root_config.trees["porttree"].dbapi
4305
4306                 self._dep_check_trees = None
4307                 self._fake_vartree = None
4308
4309         def _get_fake_vartree(self, acquire_lock=0):
4310                 fake_vartree = self._fake_vartree
4311                 if fake_vartree is None:
4312                         fake_vartree = FakeVartree(self._root_config,
4313                                 acquire_lock=acquire_lock)
4314                         self._fake_vartree = fake_vartree
4315                         self._dep_check_trees = { self._vartree.root : {
4316                                 "porttree"    :  fake_vartree,
4317                                 "vartree"     :  fake_vartree,
4318                         }}
4319                 else:
4320                         fake_vartree.sync(acquire_lock=acquire_lock)
4321                 return fake_vartree
4322
4323         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4324                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4325                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4326                 settings = self._vartree.settings
4327                 stale_cache = set(blocker_cache)
4328                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4329                 dep_check_trees = self._dep_check_trees
4330                 vardb = fake_vartree.dbapi
4331                 installed_pkgs = list(vardb)
4332
4333                 for inst_pkg in installed_pkgs:
4334                         stale_cache.discard(inst_pkg.cpv)
4335                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4336                         if cached_blockers is not None and \
4337                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4338                                 cached_blockers = None
4339                         if cached_blockers is not None:
4340                                 blocker_atoms = cached_blockers.atoms
4341                         else:
4342                                 # Use aux_get() to trigger FakeVartree global
4343                                 # updates on *DEPEND when appropriate.
4344                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4345                                 try:
4346                                         portage.dep._dep_check_strict = False
4347                                         success, atoms = portage.dep_check(depstr,
4348                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4349                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4350                                 finally:
4351                                         portage.dep._dep_check_strict = True
4352                                 if not success:
4353                                         pkg_location = os.path.join(inst_pkg.root,
4354                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4355                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4356                                                 (pkg_location, atoms), noiselevel=-1)
4357                                         continue
4358
4359                                 blocker_atoms = [atom for atom in atoms \
4360                                         if atom.startswith("!")]
4361                                 blocker_atoms.sort()
4362                                 counter = long(inst_pkg.metadata["COUNTER"])
4363                                 blocker_cache[inst_pkg.cpv] = \
4364                                         blocker_cache.BlockerData(counter, blocker_atoms)
4365                 for cpv in stale_cache:
4366                         del blocker_cache[cpv]
4367                 blocker_cache.flush()
4368
4369                 blocker_parents = digraph()
4370                 blocker_atoms = []
4371                 for pkg in installed_pkgs:
4372                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4373                                 blocker_atom = blocker_atom.lstrip("!")
4374                                 blocker_atoms.append(blocker_atom)
4375                                 blocker_parents.add(blocker_atom, pkg)
4376
4377                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4378                 blocking_pkgs = set()
4379                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4380                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4381
4382                 # Check for blockers in the other direction.
4383                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4384                 try:
4385                         portage.dep._dep_check_strict = False
4386                         success, atoms = portage.dep_check(depstr,
4387                                 vardb, settings, myuse=new_pkg.use.enabled,
4388                                 trees=dep_check_trees, myroot=new_pkg.root)
4389                 finally:
4390                         portage.dep._dep_check_strict = True
4391                 if not success:
4392                         # We should never get this far with invalid deps.
4393                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4394                         assert False
4395
4396                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4397                         if atom[:1] == "!"]
4398                 if blocker_atoms:
4399                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4400                         for inst_pkg in installed_pkgs:
4401                                 try:
4402                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4403                                 except (portage.exception.InvalidDependString, StopIteration):
4404                                         continue
4405                                 blocking_pkgs.add(inst_pkg)
4406
4407                 return blocking_pkgs
4408
4409 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4410
4411         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4412                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4413         p_type, p_root, p_key, p_status = parent_node
4414         msg = []
4415         if p_status == "nomerge":
4416                 category, pf = portage.catsplit(p_key)
4417                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4418                 msg.append("Portage is unable to process the dependencies of the ")
4419                 msg.append("'%s' package. " % p_key)
4420                 msg.append("In order to correct this problem, the package ")
4421                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4422                 msg.append("As a temporary workaround, the --nodeps option can ")
4423                 msg.append("be used to ignore all dependencies.  For reference, ")
4424                 msg.append("the problematic dependencies can be found in the ")
4425                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4426         else:
4427                 msg.append("This package can not be installed. ")
4428                 msg.append("Please notify the '%s' package maintainer " % p_key)
4429                 msg.append("about this problem.")
4430
4431         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4432         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4433
4434 class PackageVirtualDbapi(portage.dbapi):
4435         """
4436         A dbapi-like interface class that represents the state of the installed
4437         package database as new packages are installed, replacing any packages
4438         that previously existed in the same slot. The main difference between
4439         this class and fakedbapi is that this one uses Package instances
4440         internally (passed in via cpv_inject() and cpv_remove() calls).
4441         """
4442         def __init__(self, settings):
4443                 portage.dbapi.__init__(self)
4444                 self.settings = settings
4445                 self._match_cache = {}
4446                 self._cp_map = {}
4447                 self._cpv_map = {}
4448
4449         def clear(self):
4450                 """
4451                 Remove all packages.
4452                 """
4453                 if self._cpv_map:
4454                         self._clear_cache()
4455                         self._cp_map.clear()
4456                         self._cpv_map.clear()
4457
4458         def copy(self):
4459                 obj = PackageVirtualDbapi(self.settings)
4460                 obj._match_cache = self._match_cache.copy()
4461                 obj._cp_map = self._cp_map.copy()
4462                 for k, v in obj._cp_map.iteritems():
4463                         obj._cp_map[k] = v[:]
4464                 obj._cpv_map = self._cpv_map.copy()
4465                 return obj
4466
4467         def __iter__(self):
4468                 return self._cpv_map.itervalues()
4469
4470         def __contains__(self, item):
4471                 existing = self._cpv_map.get(item.cpv)
4472                 if existing is not None and \
4473                         existing == item:
4474                         return True
4475                 return False
4476
4477         def get(self, item, default=None):
4478                 cpv = getattr(item, "cpv", None)
4479                 if cpv is None:
4480                         if len(item) != 4:
4481                                 return default
4482                         type_name, root, cpv, operation = item
4483
4484                 existing = self._cpv_map.get(cpv)
4485                 if existing is not None and \
4486                         existing == item:
4487                         return existing
4488                 return default
4489
4490         def match_pkgs(self, atom):
4491                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4492
4493         def _clear_cache(self):
4494                 if self._categories is not None:
4495                         self._categories = None
4496                 if self._match_cache:
4497                         self._match_cache = {}
4498
4499         def match(self, origdep, use_cache=1):
4500                 result = self._match_cache.get(origdep)
4501                 if result is not None:
4502                         return result[:]
4503                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4504                 self._match_cache[origdep] = result
4505                 return result[:]
4506
4507         def cpv_exists(self, cpv):
4508                 return cpv in self._cpv_map
4509
4510         def cp_list(self, mycp, use_cache=1):
4511                 cachelist = self._match_cache.get(mycp)
4512                 # cp_list() doesn't expand old-style virtuals
4513                 if cachelist and cachelist[0].startswith(mycp):
4514                         return cachelist[:]
4515                 cpv_list = self._cp_map.get(mycp)
4516                 if cpv_list is None:
4517                         cpv_list = []
4518                 else:
4519                         cpv_list = [pkg.cpv for pkg in cpv_list]
4520                 self._cpv_sort_ascending(cpv_list)
4521                 if not (not cpv_list and mycp.startswith("virtual/")):
4522                         self._match_cache[mycp] = cpv_list
4523                 return cpv_list[:]
4524
4525         def cp_all(self):
4526                 return list(self._cp_map)
4527
4528         def cpv_all(self):
4529                 return list(self._cpv_map)
4530
4531         def cpv_inject(self, pkg):
4532                 cp_list = self._cp_map.get(pkg.cp)
4533                 if cp_list is None:
4534                         cp_list = []
4535                         self._cp_map[pkg.cp] = cp_list
4536                 e_pkg = self._cpv_map.get(pkg.cpv)
4537                 if e_pkg is not None:
4538                         if e_pkg == pkg:
4539                                 return
4540                         self.cpv_remove(e_pkg)
4541                 for e_pkg in cp_list:
4542                         if e_pkg.slot_atom == pkg.slot_atom:
4543                                 if e_pkg == pkg:
4544                                         return
4545                                 self.cpv_remove(e_pkg)
4546                                 break
4547                 cp_list.append(pkg)
4548                 self._cpv_map[pkg.cpv] = pkg
4549                 self._clear_cache()
4550
4551         def cpv_remove(self, pkg):
4552                 old_pkg = self._cpv_map.get(pkg.cpv)
4553                 if old_pkg != pkg:
4554                         raise KeyError(pkg)
4555                 self._cp_map[pkg.cp].remove(pkg)
4556                 del self._cpv_map[pkg.cpv]
4557                 self._clear_cache()
4558
4559         def aux_get(self, cpv, wants):
4560                 metadata = self._cpv_map[cpv].metadata
4561                 return [metadata.get(x, "") for x in wants]
4562
4563         def aux_update(self, cpv, values):
4564                 self._cpv_map[cpv].metadata.update(values)
4565                 self._clear_cache()
4566
4567 class depgraph(object):
4568
4569         pkg_tree_map = RootConfig.pkg_tree_map
4570
4571         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4572
4573         def __init__(self, settings, trees, myopts, myparams, spinner):
4574                 self.settings = settings
4575                 self.target_root = settings["ROOT"]
4576                 self.myopts = myopts
4577                 self.myparams = myparams
4578                 self.edebug = 0
4579                 if settings.get("PORTAGE_DEBUG", "") == "1":
4580                         self.edebug = 1
4581                 self.spinner = spinner
4582                 self._running_root = trees["/"]["root_config"]
4583                 self._opts_no_restart = Scheduler._opts_no_restart
4584                 self.pkgsettings = {}
4585                 # Maps slot atom to package for each Package added to the graph.
4586                 self._slot_pkg_map = {}
4587                 # Maps nodes to the reasons they were selected for reinstallation.
4588                 self._reinstall_nodes = {}
4589                 self.mydbapi = {}
4590                 self.trees = {}
4591                 self._trees_orig = trees
4592                 self.roots = {}
4593                 # Contains a filtered view of preferred packages that are selected
4594                 # from available repositories.
4595                 self._filtered_trees = {}
4596                 # Contains installed packages and new packages that have been added
4597                 # to the graph.
4598                 self._graph_trees = {}
4599                 # All Package instances
4600                 self._pkg_cache = {}
4601                 for myroot in trees:
4602                         self.trees[myroot] = {}
4603                         # Create a RootConfig instance that references
4604                         # the FakeVartree instead of the real one.
4605                         self.roots[myroot] = RootConfig(
4606                                 trees[myroot]["vartree"].settings,
4607                                 self.trees[myroot],
4608                                 trees[myroot]["root_config"].setconfig)
4609                         for tree in ("porttree", "bintree"):
4610                                 self.trees[myroot][tree] = trees[myroot][tree]
4611                         self.trees[myroot]["vartree"] = \
4612                                 FakeVartree(trees[myroot]["root_config"],
4613                                         pkg_cache=self._pkg_cache)
4614                         self.pkgsettings[myroot] = portage.config(
4615                                 clone=self.trees[myroot]["vartree"].settings)
4616                         self._slot_pkg_map[myroot] = {}
4617                         vardb = self.trees[myroot]["vartree"].dbapi
4618                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4619                                 "--buildpkgonly" not in self.myopts
4620                         # This fakedbapi instance will model the state that the vdb will
4621                         # have after new packages have been installed.
4622                         fakedb = PackageVirtualDbapi(vardb.settings)
4623                         if preload_installed_pkgs:
4624                                 for pkg in vardb:
4625                                         self.spinner.update()
4626                                         # This triggers metadata updates via FakeVartree.
4627                                         vardb.aux_get(pkg.cpv, [])
4628                                         fakedb.cpv_inject(pkg)
4629
4630                         # Now that the vardb state is cached in our FakeVartree,
4631                         # we won't be needing the real vartree cache for awhile.
4632                         # To make some room on the heap, clear the vardbapi
4633                         # caches.
4634                         trees[myroot]["vartree"].dbapi._clear_cache()
4635                         gc.collect()
4636
4637                         self.mydbapi[myroot] = fakedb
4638                         def graph_tree():
4639                                 pass
4640                         graph_tree.dbapi = fakedb
4641                         self._graph_trees[myroot] = {}
4642                         self._filtered_trees[myroot] = {}
4643                         # Substitute the graph tree for the vartree in dep_check() since we
4644                         # want atom selections to be consistent with package selections
4645                         # have already been made.
4646                         self._graph_trees[myroot]["porttree"]   = graph_tree
4647                         self._graph_trees[myroot]["vartree"]    = graph_tree
4648                         def filtered_tree():
4649                                 pass
4650                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4651                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4652
4653                         # Passing in graph_tree as the vartree here could lead to better
4654                         # atom selections in some cases by causing atoms for packages that
4655                         # have been added to the graph to be preferred over other choices.
4656                         # However, it can trigger atom selections that result in
4657                         # unresolvable direct circular dependencies. For example, this
4658                         # happens with gwydion-dylan which depends on either itself or
4659                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4660                         # gwydion-dylan-bin needs to be selected in order to avoid a
4661                         # an unresolvable direct circular dependency.
4662                         #
4663                         # To solve the problem described above, pass in "graph_db" so that
4664                         # packages that have been added to the graph are distinguishable
4665                         # from other available packages and installed packages. Also, pass
4666                         # the parent package into self._select_atoms() calls so that
4667                         # unresolvable direct circular dependencies can be detected and
4668                         # avoided when possible.
4669                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4670                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4671
4672                         dbs = []
4673                         portdb = self.trees[myroot]["porttree"].dbapi
4674                         bindb  = self.trees[myroot]["bintree"].dbapi
4675                         vardb  = self.trees[myroot]["vartree"].dbapi
4676                         #               (db, pkg_type, built, installed, db_keys)
4677                         if "--usepkgonly" not in self.myopts:
4678                                 db_keys = list(portdb._aux_cache_keys)
4679                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4680                         if "--usepkg" in self.myopts:
4681                                 db_keys = list(bindb._aux_cache_keys)
4682                                 dbs.append((bindb,  "binary", True, False, db_keys))
4683                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4684                         dbs.append((vardb, "installed", True, True, db_keys))
4685                         self._filtered_trees[myroot]["dbs"] = dbs
4686                         if "--usepkg" in self.myopts:
4687                                 self.trees[myroot]["bintree"].populate(
4688                                         "--getbinpkg" in self.myopts,
4689                                         "--getbinpkgonly" in self.myopts)
4690                 del trees
4691
4692                 self.digraph=portage.digraph()
4693                 # contains all sets added to the graph
4694                 self._sets = {}
4695                 # contains atoms given as arguments
4696                 self._sets["args"] = InternalPackageSet()
4697                 # contains all atoms from all sets added to the graph, including
4698                 # atoms given as arguments
4699                 self._set_atoms = InternalPackageSet()
4700                 self._atom_arg_map = {}
4701                 # contains all nodes pulled in by self._set_atoms
4702                 self._set_nodes = set()
4703                 # Contains only Blocker -> Uninstall edges
4704                 self._blocker_uninstalls = digraph()
4705                 # Contains only Package -> Blocker edges
4706                 self._blocker_parents = digraph()
4707                 # Contains only irrelevant Package -> Blocker edges
4708                 self._irrelevant_blockers = digraph()
4709                 # Contains only unsolvable Package -> Blocker edges
4710                 self._unsolvable_blockers = digraph()
4711                 # Contains all Blocker -> Blocked Package edges
4712                 self._blocked_pkgs = digraph()
4713                 # Contains world packages that have been protected from
4714                 # uninstallation but may not have been added to the graph
4715                 # if the graph is not complete yet.
4716                 self._blocked_world_pkgs = {}
4717                 self._slot_collision_info = {}
4718                 # Slot collision nodes are not allowed to block other packages since
4719                 # blocker validation is only able to account for one package per slot.
4720                 self._slot_collision_nodes = set()
4721                 self._parent_atoms = {}
4722                 self._slot_conflict_parent_atoms = set()
4723                 self._serialized_tasks_cache = None
4724                 self._scheduler_graph = None
4725                 self._displayed_list = None
4726                 self._pprovided_args = []
4727                 self._missing_args = []
4728                 self._masked_installed = set()
4729                 self._unsatisfied_deps_for_display = []
4730                 self._unsatisfied_blockers_for_display = None
4731                 self._circular_deps_for_display = None
4732                 self._dep_stack = []
4733                 self._unsatisfied_deps = []
4734                 self._initially_unsatisfied_deps = []
4735                 self._ignored_deps = []
4736                 self._required_set_names = set(["system", "world"])
4737                 self._select_atoms = self._select_atoms_highest_available
4738                 self._select_package = self._select_pkg_highest_available
4739                 self._highest_pkg_cache = {}
4740
4741         def _show_slot_collision_notice(self):
4742                 """Show an informational message advising the user to mask one of the
4743                 the packages. In some cases it may be possible to resolve this
4744                 automatically, but support for backtracking (removal nodes that have
4745                 already been selected) will be required in order to handle all possible
4746                 cases.
4747                 """
4748
4749                 if not self._slot_collision_info:
4750                         return
4751
4752                 self._show_merge_list()
4753
4754                 msg = []
4755                 msg.append("\n!!! Multiple package instances within a single " + \
4756                         "package slot have been pulled\n")
4757                 msg.append("!!! into the dependency graph, resulting" + \
4758                         " in a slot conflict:\n\n")
4759                 indent = "  "
4760                 # Max number of parents shown, to avoid flooding the display.
4761                 max_parents = 3
4762                 explanation_columns = 70
4763                 explanations = 0
4764                 for (slot_atom, root), slot_nodes \
4765                         in self._slot_collision_info.iteritems():
4766                         msg.append(str(slot_atom))
4767                         msg.append("\n\n")
4768
4769                         for node in slot_nodes:
4770                                 msg.append(indent)
4771                                 msg.append(str(node))
4772                                 parent_atoms = self._parent_atoms.get(node)
4773                                 if parent_atoms:
4774                                         pruned_list = set()
4775                                         # Prefer conflict atoms over others.
4776                                         for parent_atom in parent_atoms:
4777                                                 if len(pruned_list) >= max_parents:
4778                                                         break
4779                                                 if parent_atom in self._slot_conflict_parent_atoms:
4780                                                         pruned_list.add(parent_atom)
4781
4782                                         # If this package was pulled in by conflict atoms then
4783                                         # show those alone since those are the most interesting.
4784                                         if not pruned_list:
4785                                                 # When generating the pruned list, prefer instances
4786                                                 # of DependencyArg over instances of Package.
4787                                                 for parent_atom in parent_atoms:
4788                                                         if len(pruned_list) >= max_parents:
4789                                                                 break
4790                                                         parent, atom = parent_atom
4791                                                         if isinstance(parent, DependencyArg):
4792                                                                 pruned_list.add(parent_atom)
4793                                                 # Prefer Packages instances that themselves have been
4794                                                 # pulled into collision slots.
4795                                                 for parent_atom in parent_atoms:
4796                                                         if len(pruned_list) >= max_parents:
4797                                                                 break
4798                                                         parent, atom = parent_atom
4799                                                         if isinstance(parent, Package) and \
4800                                                                 (parent.slot_atom, parent.root) \
4801                                                                 in self._slot_collision_info:
4802                                                                 pruned_list.add(parent_atom)
4803                                                 for parent_atom in parent_atoms:
4804                                                         if len(pruned_list) >= max_parents:
4805                                                                 break
4806                                                         pruned_list.add(parent_atom)
4807                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4808                                         parent_atoms = pruned_list
4809                                         msg.append(" pulled in by\n")
4810                                         for parent_atom in parent_atoms:
4811                                                 parent, atom = parent_atom
4812                                                 msg.append(2*indent)
4813                                                 if isinstance(parent,
4814                                                         (PackageArg, AtomArg)):
4815                                                         # For PackageArg and AtomArg types, it's
4816                                                         # redundant to display the atom attribute.
4817                                                         msg.append(str(parent))
4818                                                 else:
4819                                                         # Display the specific atom from SetArg or
4820                                                         # Package types.
4821                                                         msg.append("%s required by %s" % (atom, parent))
4822                                                 msg.append("\n")
4823                                         if omitted_parents:
4824                                                 msg.append(2*indent)
4825                                                 msg.append("(and %d more)\n" % omitted_parents)
4826                                 else:
4827                                         msg.append(" (no parents)\n")
4828                                 msg.append("\n")
4829                         explanation = self._slot_conflict_explanation(slot_nodes)
4830                         if explanation:
4831                                 explanations += 1
4832                                 msg.append(indent + "Explanation:\n\n")
4833                                 for line in textwrap.wrap(explanation, explanation_columns):
4834                                         msg.append(2*indent + line + "\n")
4835                                 msg.append("\n")
4836                 msg.append("\n")
4837                 sys.stderr.write("".join(msg))
4838                 sys.stderr.flush()
4839
4840                 explanations_for_all = explanations == len(self._slot_collision_info)
4841
4842                 if explanations_for_all or "--quiet" in self.myopts:
4843                         return
4844
4845                 msg = []
4846                 msg.append("It may be possible to solve this problem ")
4847                 msg.append("by using package.mask to prevent one of ")
4848                 msg.append("those packages from being selected. ")
4849                 msg.append("However, it is also possible that conflicting ")
4850                 msg.append("dependencies exist such that they are impossible to ")
4851                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4852                 msg.append("the dependencies of two different packages, then those ")
4853                 msg.append("packages can not be installed simultaneously.")
4854
4855                 from formatter import AbstractFormatter, DumbWriter
4856                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4857                 for x in msg:
4858                         f.add_flowing_data(x)
4859                 f.end_paragraph(1)
4860
4861                 msg = []
4862                 msg.append("For more information, see MASKED PACKAGES ")
4863                 msg.append("section in the emerge man page or refer ")
4864                 msg.append("to the Gentoo Handbook.")
4865                 for x in msg:
4866                         f.add_flowing_data(x)
4867                 f.end_paragraph(1)
4868                 f.writer.flush()
4869
4870         def _slot_conflict_explanation(self, slot_nodes):
4871                 """
4872                 When a slot conflict occurs due to USE deps, there are a few
4873                 different cases to consider:
4874
4875                 1) New USE are correctly set but --newuse wasn't requested so an
4876                    installed package with incorrect USE happened to get pulled
4877                    into graph before the new one.
4878
4879                 2) New USE are incorrectly set but an installed package has correct
4880                    USE so it got pulled into the graph, and a new instance also got
4881                    pulled in due to --newuse or an upgrade.
4882
4883                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4884                    and multiple package instances got pulled into the same slot to
4885                    satisfy the conflicting deps.
4886
4887                 Currently, explanations and suggested courses of action are generated
4888                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4889                 """
4890
4891                 if len(slot_nodes) != 2:
4892                         # Suggestions are only implemented for
4893                         # conflicts between two packages.
4894                         return None
4895
4896                 all_conflict_atoms = self._slot_conflict_parent_atoms
4897                 matched_node = None
4898                 matched_atoms = None
4899                 unmatched_node = None
4900                 for node in slot_nodes:
4901                         parent_atoms = self._parent_atoms.get(node)
4902                         if not parent_atoms:
4903                                 # Normally, there are always parent atoms. If there are
4904                                 # none then something unexpected is happening and there's
4905                                 # currently no suggestion for this case.
4906                                 return None
4907                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4908                         for parent_atom in conflict_atoms:
4909                                 parent, atom = parent_atom
4910                                 if not atom.use:
4911                                         # Suggestions are currently only implemented for cases
4912                                         # in which all conflict atoms have USE deps.
4913                                         return None
4914                         if conflict_atoms:
4915                                 if matched_node is not None:
4916                                         # If conflict atoms match multiple nodes
4917                                         # then there's no suggestion.
4918                                         return None
4919                                 matched_node = node
4920                                 matched_atoms = conflict_atoms
4921                         else:
4922                                 if unmatched_node is not None:
4923                                         # Neither node is matched by conflict atoms, and
4924                                         # there is no suggestion for this case.
4925                                         return None
4926                                 unmatched_node = node
4927
4928                 if matched_node is None or unmatched_node is None:
4929                         # This shouldn't happen.
4930                         return None
4931
4932                 if unmatched_node.installed and not matched_node.installed and \
4933                         unmatched_node.cpv == matched_node.cpv:
4934                         # If the conflicting packages are the same version then
4935                         # --newuse should be all that's needed. If they are different
4936                         # versions then there's some other problem.
4937                         return "New USE are correctly set, but --newuse wasn't" + \
4938                                 " requested, so an installed package with incorrect USE " + \
4939                                 "happened to get pulled into the dependency graph. " + \
4940                                 "In order to solve " + \
4941                                 "this, either specify the --newuse option or explicitly " + \
4942                                 " reinstall '%s'." % matched_node.slot_atom
4943
4944                 if matched_node.installed and not unmatched_node.installed:
4945                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4946                         explanation = ("New USE for '%s' are incorrectly set. " + \
4947                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4948                                 (matched_node.slot_atom, atoms[0])
4949                         if len(atoms) > 1:
4950                                 for atom in atoms[1:-1]:
4951                                         explanation += ", '%s'" % (atom,)
4952                                 if len(atoms) > 2:
4953                                         explanation += ","
4954                                 explanation += " and '%s'" % (atoms[-1],)
4955                         explanation += "."
4956                         return explanation
4957
4958                 return None
4959
4960         def _process_slot_conflicts(self):
4961                 """
4962                 Process slot conflict data to identify specific atoms which
4963                 lead to conflict. These atoms only match a subset of the
4964                 packages that have been pulled into a given slot.
4965                 """
4966                 for (slot_atom, root), slot_nodes \
4967                         in self._slot_collision_info.iteritems():
4968
4969                         all_parent_atoms = set()
4970                         for pkg in slot_nodes:
4971                                 parent_atoms = self._parent_atoms.get(pkg)
4972                                 if not parent_atoms:
4973                                         continue
4974                                 all_parent_atoms.update(parent_atoms)
4975
4976                         for pkg in slot_nodes:
4977                                 parent_atoms = self._parent_atoms.get(pkg)
4978                                 if parent_atoms is None:
4979                                         parent_atoms = set()
4980                                         self._parent_atoms[pkg] = parent_atoms
4981                                 for parent_atom in all_parent_atoms:
4982                                         if parent_atom in parent_atoms:
4983                                                 continue
4984                                         # Use package set for matching since it will match via
4985                                         # PROVIDE when necessary, while match_from_list does not.
4986                                         parent, atom = parent_atom
4987                                         atom_set = InternalPackageSet(
4988                                                 initial_atoms=(atom,))
4989                                         if atom_set.findAtomForPackage(pkg):
4990                                                 parent_atoms.add(parent_atom)
4991                                         else:
4992                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4993
4994         def _reinstall_for_flags(self, forced_flags,
4995                 orig_use, orig_iuse, cur_use, cur_iuse):
4996                 """Return a set of flags that trigger reinstallation, or None if there
4997                 are no such flags."""
4998                 if "--newuse" in self.myopts:
4999                         flags = set(orig_iuse.symmetric_difference(
5000                                 cur_iuse).difference(forced_flags))
5001                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
5002                                 cur_iuse.intersection(cur_use)))
5003                         if flags:
5004                                 return flags
5005                 elif "changed-use" == self.myopts.get("--reinstall"):
5006                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
5007                                 cur_iuse.intersection(cur_use))
5008                         if flags:
5009                                 return flags
5010                 return None
5011
5012         def _create_graph(self, allow_unsatisfied=False):
5013                 dep_stack = self._dep_stack
5014                 while dep_stack:
5015                         self.spinner.update()
5016                         dep = dep_stack.pop()
5017                         if isinstance(dep, Package):
5018                                 if not self._add_pkg_deps(dep,
5019                                         allow_unsatisfied=allow_unsatisfied):
5020                                         return 0
5021                                 continue
5022                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
5023                                 return 0
5024                 return 1
5025
5026         def _add_dep(self, dep, allow_unsatisfied=False):
5027                 debug = "--debug" in self.myopts
5028                 buildpkgonly = "--buildpkgonly" in self.myopts
5029                 nodeps = "--nodeps" in self.myopts
5030                 empty = "empty" in self.myparams
5031                 deep = "deep" in self.myparams
5032                 update = "--update" in self.myopts and dep.depth <= 1
5033                 if dep.blocker:
5034                         if not buildpkgonly and \
5035                                 not nodeps and \
5036                                 dep.parent not in self._slot_collision_nodes:
5037                                 if dep.parent.onlydeps:
5038                                         # It's safe to ignore blockers if the
5039                                         # parent is an --onlydeps node.
5040                                         return 1
5041                                 # The blocker applies to the root where
5042                                 # the parent is or will be installed.
5043                                 blocker = Blocker(atom=dep.atom,
5044                                         eapi=dep.parent.metadata["EAPI"],
5045                                         root=dep.parent.root)
5046                                 self._blocker_parents.add(blocker, dep.parent)
5047                         return 1
5048                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
5049                         onlydeps=dep.onlydeps)
5050                 if not dep_pkg:
5051                         if dep.priority.optional:
5052                                 # This could be an unecessary build-time dep
5053                                 # pulled in by --with-bdeps=y.
5054                                 return 1
5055                         if allow_unsatisfied:
5056                                 self._unsatisfied_deps.append(dep)
5057                                 return 1
5058                         self._unsatisfied_deps_for_display.append(
5059                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
5060                         return 0
5061                 # In some cases, dep_check will return deps that shouldn't
5062                 # be proccessed any further, so they are identified and
5063                 # discarded here. Try to discard as few as possible since
5064                 # discarded dependencies reduce the amount of information
5065                 # available for optimization of merge order.
5066                 if dep.priority.satisfied and \
5067                         not dep_pkg.installed and \
5068                         not (existing_node or empty or deep or update):
5069                         myarg = None
5070                         if dep.root == self.target_root:
5071                                 try:
5072                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
5073                                 except StopIteration:
5074                                         pass
5075                                 except portage.exception.InvalidDependString:
5076                                         if not dep_pkg.installed:
5077                                                 # This shouldn't happen since the package
5078                                                 # should have been masked.
5079                                                 raise
5080                         if not myarg:
5081                                 self._ignored_deps.append(dep)
5082                                 return 1
5083
5084                 if not self._add_pkg(dep_pkg, dep):
5085                         return 0
5086                 return 1
5087
5088         def _add_pkg(self, pkg, dep):
5089                 myparent = None
5090                 priority = None
5091                 depth = 0
5092                 if dep is None:
5093                         dep = Dependency()
5094                 else:
5095                         myparent = dep.parent
5096                         priority = dep.priority
5097                         depth = dep.depth
5098                 if priority is None:
5099                         priority = DepPriority()
5100                 """
5101                 Fills the digraph with nodes comprised of packages to merge.
5102                 mybigkey is the package spec of the package to merge.
5103                 myparent is the package depending on mybigkey ( or None )
5104                 addme = Should we add this package to the digraph or are we just looking at it's deps?
5105                         Think --onlydeps, we need to ignore packages in that case.
5106                 #stuff to add:
5107                 #SLOT-aware emerge
5108                 #IUSE-aware emerge -> USE DEP aware depgraph
5109                 #"no downgrade" emerge
5110                 """
5111                 # Ensure that the dependencies of the same package
5112                 # are never processed more than once.
5113                 previously_added = pkg in self.digraph
5114
5115                 # select the correct /var database that we'll be checking against
5116                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
5117                 pkgsettings = self.pkgsettings[pkg.root]
5118
5119                 arg_atoms = None
5120                 if True:
5121                         try:
5122                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
5123                         except portage.exception.InvalidDependString, e:
5124                                 if not pkg.installed:
5125                                         show_invalid_depstring_notice(
5126                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5127                                         return 0
5128                                 del e
5129
5130                 if not pkg.onlydeps:
5131                         if not pkg.installed and \
5132                                 "empty" not in self.myparams and \
5133                                 vardbapi.match(pkg.slot_atom):
5134                                 # Increase the priority of dependencies on packages that
5135                                 # are being rebuilt. This optimizes merge order so that
5136                                 # dependencies are rebuilt/updated as soon as possible,
5137                                 # which is needed especially when emerge is called by
5138                                 # revdep-rebuild since dependencies may be affected by ABI
5139                                 # breakage that has rendered them useless. Don't adjust
5140                                 # priority here when in "empty" mode since all packages
5141                                 # are being merged in that case.
5142                                 priority.rebuild = True
5143
5144                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
5145                         slot_collision = False
5146                         if existing_node:
5147                                 existing_node_matches = pkg.cpv == existing_node.cpv
5148                                 if existing_node_matches and \
5149                                         pkg != existing_node and \
5150                                         dep.atom is not None:
5151                                         # Use package set for matching since it will match via
5152                                         # PROVIDE when necessary, while match_from_list does not.
5153                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
5154                                         if not atom_set.findAtomForPackage(existing_node):
5155                                                 existing_node_matches = False
5156                                 if existing_node_matches:
5157                                         # The existing node can be reused.
5158                                         if arg_atoms:
5159                                                 for parent_atom in arg_atoms:
5160                                                         parent, atom = parent_atom
5161                                                         self.digraph.add(existing_node, parent,
5162                                                                 priority=priority)
5163                                                         self._add_parent_atom(existing_node, parent_atom)
5164                                         # If a direct circular dependency is not an unsatisfied
5165                                         # buildtime dependency then drop it here since otherwise
5166                                         # it can skew the merge order calculation in an unwanted
5167                                         # way.
5168                                         if existing_node != myparent or \
5169                                                 (priority.buildtime and not priority.satisfied):
5170                                                 self.digraph.addnode(existing_node, myparent,
5171                                                         priority=priority)
5172                                                 if dep.atom is not None and dep.parent is not None:
5173                                                         self._add_parent_atom(existing_node,
5174                                                                 (dep.parent, dep.atom))
5175                                         return 1
5176                                 else:
5177
5178                                         # A slot collision has occurred.  Sometimes this coincides
5179                                         # with unresolvable blockers, so the slot collision will be
5180                                         # shown later if there are no unresolvable blockers.
5181                                         self._add_slot_conflict(pkg)
5182                                         slot_collision = True
5183
5184                         if slot_collision:
5185                                 # Now add this node to the graph so that self.display()
5186                                 # can show use flags and --tree portage.output.  This node is
5187                                 # only being partially added to the graph.  It must not be
5188                                 # allowed to interfere with the other nodes that have been
5189                                 # added.  Do not overwrite data for existing nodes in
5190                                 # self.mydbapi since that data will be used for blocker
5191                                 # validation.
5192                                 # Even though the graph is now invalid, continue to process
5193                                 # dependencies so that things like --fetchonly can still
5194                                 # function despite collisions.
5195                                 pass
5196                         elif not previously_added:
5197                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
5198                                 self.mydbapi[pkg.root].cpv_inject(pkg)
5199                                 self._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
5200
5201                         if not pkg.installed:
5202                                 # Allow this package to satisfy old-style virtuals in case it
5203                                 # doesn't already. Any pre-existing providers will be preferred
5204                                 # over this one.
5205                                 try:
5206                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
5207                                         # For consistency, also update the global virtuals.
5208                                         settings = self.roots[pkg.root].settings
5209                                         settings.unlock()
5210                                         settings.setinst(pkg.cpv, pkg.metadata)
5211                                         settings.lock()
5212                                 except portage.exception.InvalidDependString, e:
5213                                         show_invalid_depstring_notice(
5214                                                 pkg, pkg.metadata["PROVIDE"], str(e))
5215                                         del e
5216                                         return 0
5217
5218                 if arg_atoms:
5219                         self._set_nodes.add(pkg)
5220
5221                 # Do this even when addme is False (--onlydeps) so that the
5222                 # parent/child relationship is always known in case
5223                 # self._show_slot_collision_notice() needs to be called later.
5224                 self.digraph.add(pkg, myparent, priority=priority)
5225                 if dep.atom is not None and dep.parent is not None:
5226                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
5227
5228                 if arg_atoms:
5229                         for parent_atom in arg_atoms:
5230                                 parent, atom = parent_atom
5231                                 self.digraph.add(pkg, parent, priority=priority)
5232                                 self._add_parent_atom(pkg, parent_atom)
5233
5234                 """ This section determines whether we go deeper into dependencies or not.
5235                     We want to go deeper on a few occasions:
5236                     Installing package A, we need to make sure package A's deps are met.
5237                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5238                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5239                 """
5240                 dep_stack = self._dep_stack
5241                 if "recurse" not in self.myparams:
5242                         return 1
5243                 elif pkg.installed and \
5244                         "deep" not in self.myparams:
5245                         dep_stack = self._ignored_deps
5246
5247                 self.spinner.update()
5248
5249                 if arg_atoms:
5250                         depth = 0
5251                 pkg.depth = depth
5252                 if not previously_added:
5253                         dep_stack.append(pkg)
5254                 return 1
5255
5256         def _add_parent_atom(self, pkg, parent_atom):
5257                 parent_atoms = self._parent_atoms.get(pkg)
5258                 if parent_atoms is None:
5259                         parent_atoms = set()
5260                         self._parent_atoms[pkg] = parent_atoms
5261                 parent_atoms.add(parent_atom)
5262
5263         def _add_slot_conflict(self, pkg):
5264                 self._slot_collision_nodes.add(pkg)
5265                 slot_key = (pkg.slot_atom, pkg.root)
5266                 slot_nodes = self._slot_collision_info.get(slot_key)
5267                 if slot_nodes is None:
5268                         slot_nodes = set()
5269                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5270                         self._slot_collision_info[slot_key] = slot_nodes
5271                 slot_nodes.add(pkg)
5272
5273         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5274
5275                 mytype = pkg.type_name
5276                 myroot = pkg.root
5277                 mykey = pkg.cpv
5278                 metadata = pkg.metadata
5279                 myuse = pkg.use.enabled
5280                 jbigkey = pkg
5281                 depth = pkg.depth + 1
5282                 removal_action = "remove" in self.myparams
5283
5284                 edepend={}
5285                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5286                 for k in depkeys:
5287                         edepend[k] = metadata[k]
5288
5289                 if not pkg.built and \
5290                         "--buildpkgonly" in self.myopts and \
5291                         "deep" not in self.myparams and \
5292                         "empty" not in self.myparams:
5293                         edepend["RDEPEND"] = ""
5294                         edepend["PDEPEND"] = ""
5295                 bdeps_optional = False
5296
5297                 if pkg.built and not removal_action:
5298                         if self.myopts.get("--with-bdeps", "n") == "y":
5299                                 # Pull in build time deps as requested, but marked them as
5300                                 # "optional" since they are not strictly required. This allows
5301                                 # more freedom in the merge order calculation for solving
5302                                 # circular dependencies. Don't convert to PDEPEND since that
5303                                 # could make --with-bdeps=y less effective if it is used to
5304                                 # adjust merge order to prevent built_with_use() calls from
5305                                 # failing.
5306                                 bdeps_optional = True
5307                         else:
5308                                 # built packages do not have build time dependencies.
5309                                 edepend["DEPEND"] = ""
5310
5311                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5312                         edepend["DEPEND"] = ""
5313
5314                 bdeps_root = "/"
5315                 root_deps = self.myopts.get("--root-deps")
5316                 if root_deps is not None:
5317                         if root_deps is True:
5318                                 bdeps_root = myroot
5319                         elif root_deps == "rdeps":
5320                                 edepend["DEPEND"] = ""
5321
5322                 deps = (
5323                         (bdeps_root, edepend["DEPEND"],
5324                                 self._priority(buildtime=(not bdeps_optional),
5325                                 optional=bdeps_optional)),
5326                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5327                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5328                 )
5329
5330                 debug = "--debug" in self.myopts
5331                 strict = mytype != "installed"
5332                 try:
5333                         for dep_root, dep_string, dep_priority in deps:
5334                                 if not dep_string:
5335                                         continue
5336                                 if debug:
5337                                         print
5338                                         print "Parent:   ", jbigkey
5339                                         print "Depstring:", dep_string
5340                                         print "Priority:", dep_priority
5341                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5342                                 try:
5343                                         selected_atoms = self._select_atoms(dep_root,
5344                                                 dep_string, myuse=myuse, parent=pkg, strict=strict,
5345                                                 priority=dep_priority)
5346                                 except portage.exception.InvalidDependString, e:
5347                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5348                                         return 0
5349                                 if debug:
5350                                         print "Candidates:", selected_atoms
5351
5352                                 for atom in selected_atoms:
5353                                         try:
5354
5355                                                 atom = portage.dep.Atom(atom)
5356
5357                                                 mypriority = dep_priority.copy()
5358                                                 if not atom.blocker and vardb.match(atom):
5359                                                         mypriority.satisfied = True
5360
5361                                                 if not self._add_dep(Dependency(atom=atom,
5362                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5363                                                         priority=mypriority, root=dep_root),
5364                                                         allow_unsatisfied=allow_unsatisfied):
5365                                                         return 0
5366
5367                                         except portage.exception.InvalidAtom, e:
5368                                                 show_invalid_depstring_notice(
5369                                                         pkg, dep_string, str(e))
5370                                                 del e
5371                                                 if not pkg.installed:
5372                                                         return 0
5373
5374                                 if debug:
5375                                         print "Exiting...", jbigkey
5376                 except portage.exception.AmbiguousPackageName, e:
5377                         pkgs = e.args[0]
5378                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5379                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5380                         for cpv in pkgs:
5381                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5382                         portage.writemsg("\n", noiselevel=-1)
5383                         if mytype == "binary":
5384                                 portage.writemsg(
5385                                         "!!! This binary package cannot be installed: '%s'\n" % \
5386                                         mykey, noiselevel=-1)
5387                         elif mytype == "ebuild":
5388                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5389                                 myebuild, mylocation = portdb.findname2(mykey)
5390                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5391                                         "'%s'\n" % myebuild, noiselevel=-1)
5392                         portage.writemsg("!!! Please notify the package maintainer " + \
5393                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5394                         return 0
5395                 return 1
5396
5397         def _priority(self, **kwargs):
5398                 if "remove" in self.myparams:
5399                         priority_constructor = UnmergeDepPriority
5400                 else:
5401                         priority_constructor = DepPriority
5402                 return priority_constructor(**kwargs)
5403
5404         def _dep_expand(self, root_config, atom_without_category):
5405                 """
5406                 @param root_config: a root config instance
5407                 @type root_config: RootConfig
5408                 @param atom_without_category: an atom without a category component
5409                 @type atom_without_category: String
5410                 @rtype: list
5411                 @returns: a list of atoms containing categories (possibly empty)
5412                 """
5413                 null_cp = portage.dep_getkey(insert_category_into_atom(
5414                         atom_without_category, "null"))
5415                 cat, atom_pn = portage.catsplit(null_cp)
5416
5417                 dbs = self._filtered_trees[root_config.root]["dbs"]
5418                 categories = set()
5419                 for db, pkg_type, built, installed, db_keys in dbs:
5420                         for cat in db.categories:
5421                                 if db.cp_list("%s/%s" % (cat, atom_pn)):
5422                                         categories.add(cat)
5423
5424                 deps = []
5425                 for cat in categories:
5426                         deps.append(insert_category_into_atom(
5427                                 atom_without_category, cat))
5428                 return deps
5429
5430         def _have_new_virt(self, root, atom_cp):
5431                 ret = False
5432                 for db, pkg_type, built, installed, db_keys in \
5433                         self._filtered_trees[root]["dbs"]:
5434                         if db.cp_list(atom_cp):
5435                                 ret = True
5436                                 break
5437                 return ret
5438
5439         def _iter_atoms_for_pkg(self, pkg):
5440                 # TODO: add multiple $ROOT support
5441                 if pkg.root != self.target_root:
5442                         return
5443                 atom_arg_map = self._atom_arg_map
5444                 root_config = self.roots[pkg.root]
5445                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5446                         atom_cp = portage.dep_getkey(atom)
5447                         if atom_cp != pkg.cp and \
5448                                 self._have_new_virt(pkg.root, atom_cp):
5449                                 continue
5450                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5451                         visible_pkgs.reverse() # descending order
5452                         higher_slot = None
5453                         for visible_pkg in visible_pkgs:
5454                                 if visible_pkg.cp != atom_cp:
5455                                         continue
5456                                 if pkg >= visible_pkg:
5457                                         # This is descending order, and we're not
5458                                         # interested in any versions <= pkg given.
5459                                         break
5460                                 if pkg.slot_atom != visible_pkg.slot_atom:
5461                                         higher_slot = visible_pkg
5462                                         break
5463                         if higher_slot is not None:
5464                                 continue
5465                         for arg in atom_arg_map[(atom, pkg.root)]:
5466                                 if isinstance(arg, PackageArg) and \
5467                                         arg.package != pkg:
5468                                         continue
5469                                 yield arg, atom
5470
5471         def select_files(self, myfiles):
5472                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5473                 appropriate depgraph and return a favorite list."""
5474                 debug = "--debug" in self.myopts
5475                 root_config = self.roots[self.target_root]
5476                 sets = root_config.sets
5477                 getSetAtoms = root_config.setconfig.getSetAtoms
5478                 myfavorites=[]
5479                 myroot = self.target_root
5480                 dbs = self._filtered_trees[myroot]["dbs"]
5481                 vardb = self.trees[myroot]["vartree"].dbapi
5482                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5483                 portdb = self.trees[myroot]["porttree"].dbapi
5484                 bindb = self.trees[myroot]["bintree"].dbapi
5485                 pkgsettings = self.pkgsettings[myroot]
5486                 args = []
5487                 onlydeps = "--onlydeps" in self.myopts
5488                 lookup_owners = []
5489                 for x in myfiles:
5490                         ext = os.path.splitext(x)[1]
5491                         if ext==".tbz2":
5492                                 if not os.path.exists(x):
5493                                         if os.path.exists(
5494                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5495                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5496                                         elif os.path.exists(
5497                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5498                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5499                                         else:
5500                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5501                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5502                                                 return 0, myfavorites
5503                                 mytbz2=portage.xpak.tbz2(x)
5504                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5505                                 if os.path.realpath(x) != \
5506                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5507                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5508                                         return 0, myfavorites
5509                                 db_keys = list(bindb._aux_cache_keys)
5510                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5511                                 pkg = Package(type_name="binary", root_config=root_config,
5512                                         cpv=mykey, built=True, metadata=metadata,
5513                                         onlydeps=onlydeps)
5514                                 self._pkg_cache[pkg] = pkg
5515                                 args.append(PackageArg(arg=x, package=pkg,
5516                                         root_config=root_config))
5517                         elif ext==".ebuild":
5518                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5519                                 pkgdir = os.path.dirname(ebuild_path)
5520                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5521                                 cp = pkgdir[len(tree_root)+1:]
5522                                 e = portage.exception.PackageNotFound(
5523                                         ("%s is not in a valid portage tree " + \
5524                                         "hierarchy or does not exist") % x)
5525                                 if not portage.isvalidatom(cp):
5526                                         raise e
5527                                 cat = portage.catsplit(cp)[0]
5528                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5529                                 if not portage.isvalidatom("="+mykey):
5530                                         raise e
5531                                 ebuild_path = portdb.findname(mykey)
5532                                 if ebuild_path:
5533                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5534                                                 cp, os.path.basename(ebuild_path)):
5535                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5536                                                 return 0, myfavorites
5537                                         if mykey not in portdb.xmatch(
5538                                                 "match-visible", portage.dep_getkey(mykey)):
5539                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5540                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5541                                                 print colorize("BAD", "*** page for details.")
5542                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5543                                                         "Continuing...")
5544                                 else:
5545                                         raise portage.exception.PackageNotFound(
5546                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5547                                 db_keys = list(portdb._aux_cache_keys)
5548                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5549                                 pkg = Package(type_name="ebuild", root_config=root_config,
5550                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5551                                 pkgsettings.setcpv(pkg)
5552                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5553                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
5554                                 self._pkg_cache[pkg] = pkg
5555                                 args.append(PackageArg(arg=x, package=pkg,
5556                                         root_config=root_config))
5557                         elif x.startswith(os.path.sep):
5558                                 if not x.startswith(myroot):
5559                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5560                                                 " $ROOT.\n") % x, noiselevel=-1)
5561                                         return 0, []
5562                                 # Queue these up since it's most efficient to handle
5563                                 # multiple files in a single iter_owners() call.
5564                                 lookup_owners.append(x)
5565                         else:
5566                                 if x in ("system", "world"):
5567                                         x = SETPREFIX + x
5568                                 if x.startswith(SETPREFIX):
5569                                         s = x[len(SETPREFIX):]
5570                                         if s not in sets:
5571                                                 raise portage.exception.PackageSetNotFound(s)
5572                                         if s in self._sets:
5573                                                 continue
5574                                         # Recursively expand sets so that containment tests in
5575                                         # self._get_parent_sets() properly match atoms in nested
5576                                         # sets (like if world contains system).
5577                                         expanded_set = InternalPackageSet(
5578                                                 initial_atoms=getSetAtoms(s))
5579                                         self._sets[s] = expanded_set
5580                                         args.append(SetArg(arg=x, set=expanded_set,
5581                                                 root_config=root_config))
5582                                         continue
5583                                 if not is_valid_package_atom(x):
5584                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5585                                                 noiselevel=-1)
5586                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5587                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5588                                         return (0,[])
5589                                 # Don't expand categories or old-style virtuals here unless
5590                                 # necessary. Expansion of old-style virtuals here causes at
5591                                 # least the following problems:
5592                                 #   1) It's more difficult to determine which set(s) an atom
5593                                 #      came from, if any.
5594                                 #   2) It takes away freedom from the resolver to choose other
5595                                 #      possible expansions when necessary.
5596                                 if "/" in x:
5597                                         args.append(AtomArg(arg=x, atom=x,
5598                                                 root_config=root_config))
5599                                         continue
5600                                 expanded_atoms = self._dep_expand(root_config, x)
5601                                 installed_cp_set = set()
5602                                 for atom in expanded_atoms:
5603                                         atom_cp = portage.dep_getkey(atom)
5604                                         if vardb.cp_list(atom_cp):
5605                                                 installed_cp_set.add(atom_cp)
5606
5607                                 if len(installed_cp_set) > 1:
5608                                         non_virtual_cps = set()
5609                                         for atom_cp in installed_cp_set:
5610                                                 if not atom_cp.startswith("virtual/"):
5611                                                         non_virtual_cps.add(atom_cp)
5612                                         if len(non_virtual_cps) == 1:
5613                                                 installed_cp_set = non_virtual_cps
5614
5615                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5616                                         installed_cp = iter(installed_cp_set).next()
5617                                         expanded_atoms = [atom for atom in expanded_atoms \
5618                                                 if portage.dep_getkey(atom) == installed_cp]
5619
5620                                 if len(expanded_atoms) > 1:
5621                                         print
5622                                         print
5623                                         ambiguous_package_name(x, expanded_atoms, root_config,
5624                                                 self.spinner, self.myopts)
5625                                         return False, myfavorites
5626                                 if expanded_atoms:
5627                                         atom = expanded_atoms[0]
5628                                 else:
5629                                         null_atom = insert_category_into_atom(x, "null")
5630                                         null_cp = portage.dep_getkey(null_atom)
5631                                         cat, atom_pn = portage.catsplit(null_cp)
5632                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5633                                         if virts_p:
5634                                                 # Allow the depgraph to choose which virtual.
5635                                                 atom = insert_category_into_atom(x, "virtual")
5636                                         else:
5637                                                 atom = insert_category_into_atom(x, "null")
5638
5639                                 args.append(AtomArg(arg=x, atom=atom,
5640                                         root_config=root_config))
5641
5642                 if lookup_owners:
5643                         relative_paths = []
5644                         search_for_multiple = False
5645                         if len(lookup_owners) > 1:
5646                                 search_for_multiple = True
5647
5648                         for x in lookup_owners:
5649                                 if not search_for_multiple and os.path.isdir(x):
5650                                         search_for_multiple = True
5651                                 relative_paths.append(x[len(myroot):])
5652
5653                         owners = set()
5654                         for pkg, relative_path in \
5655                                 real_vardb._owners.iter_owners(relative_paths):
5656                                 owners.add(pkg.mycpv)
5657                                 if not search_for_multiple:
5658                                         break
5659
5660                         if not owners:
5661                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5662                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5663                                 return 0, []
5664
5665                         for cpv in owners:
5666                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5667                                 if not slot:
5668                                         # portage now masks packages with missing slot, but it's
5669                                         # possible that one was installed by an older version
5670                                         atom = portage.cpv_getkey(cpv)
5671                                 else:
5672                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5673                                 args.append(AtomArg(arg=atom, atom=atom,
5674                                         root_config=root_config))
5675
5676                 if "--update" in self.myopts:
5677                         # In some cases, the greedy slots behavior can pull in a slot that
5678                         # the user would want to uninstall due to it being blocked by a
5679                         # newer version in a different slot. Therefore, it's necessary to
5680                         # detect and discard any that should be uninstalled. Each time
5681                         # that arguments are updated, package selections are repeated in
5682                         # order to ensure consistency with the current arguments:
5683                         #
5684                         #  1) Initialize args
5685                         #  2) Select packages and generate initial greedy atoms
5686                         #  3) Update args with greedy atoms
5687                         #  4) Select packages and generate greedy atoms again, while
5688                         #     accounting for any blockers between selected packages
5689                         #  5) Update args with revised greedy atoms
5690
5691                         self._set_args(args)
5692                         greedy_args = []
5693                         for arg in args:
5694                                 greedy_args.append(arg)
5695                                 if not isinstance(arg, AtomArg):
5696                                         continue
5697                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5698                                         greedy_args.append(
5699                                                 AtomArg(arg=arg.arg, atom=atom,
5700                                                         root_config=arg.root_config))
5701
5702                         self._set_args(greedy_args)
5703                         del greedy_args
5704
5705                         # Revise greedy atoms, accounting for any blockers
5706                         # between selected packages.
5707                         revised_greedy_args = []
5708                         for arg in args:
5709                                 revised_greedy_args.append(arg)
5710                                 if not isinstance(arg, AtomArg):
5711                                         continue
5712                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5713                                         blocker_lookahead=True):
5714                                         revised_greedy_args.append(
5715                                                 AtomArg(arg=arg.arg, atom=atom,
5716                                                         root_config=arg.root_config))
5717                         args = revised_greedy_args
5718                         del revised_greedy_args
5719
5720                 self._set_args(args)
5721
5722                 myfavorites = set(myfavorites)
5723                 for arg in args:
5724                         if isinstance(arg, (AtomArg, PackageArg)):
5725                                 myfavorites.add(arg.atom)
5726                         elif isinstance(arg, SetArg):
5727                                 myfavorites.add(arg.arg)
5728                 myfavorites = list(myfavorites)
5729
5730                 pprovideddict = pkgsettings.pprovideddict
5731                 if debug:
5732                         portage.writemsg("\n", noiselevel=-1)
5733                 # Order needs to be preserved since a feature of --nodeps
5734                 # is to allow the user to force a specific merge order.
5735                 args.reverse()
5736                 while args:
5737                         arg = args.pop()
5738                         for atom in arg.set:
5739                                 self.spinner.update()
5740                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5741                                         root=myroot, parent=arg)
5742                                 atom_cp = portage.dep_getkey(atom)
5743                                 try:
5744                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5745                                         if pprovided and portage.match_from_list(atom, pprovided):
5746                                                 # A provided package has been specified on the command line.
5747                                                 self._pprovided_args.append((arg, atom))
5748                                                 continue
5749                                         if isinstance(arg, PackageArg):
5750                                                 if not self._add_pkg(arg.package, dep) or \
5751                                                         not self._create_graph():
5752                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5753                                                                 "dependencies for %s\n") % arg.arg)
5754                                                         return 0, myfavorites
5755                                                 continue
5756                                         if debug:
5757                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5758                                                         (arg, atom), noiselevel=-1)
5759                                         pkg, existing_node = self._select_package(
5760                                                 myroot, atom, onlydeps=onlydeps)
5761                                         if not pkg:
5762                                                 if not (isinstance(arg, SetArg) and \
5763                                                         arg.name in ("system", "world")):
5764                                                         self._unsatisfied_deps_for_display.append(
5765                                                                 ((myroot, atom), {}))
5766                                                         return 0, myfavorites
5767                                                 self._missing_args.append((arg, atom))
5768                                                 continue
5769                                         if atom_cp != pkg.cp:
5770                                                 # For old-style virtuals, we need to repeat the
5771                                                 # package.provided check against the selected package.
5772                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5773                                                 pprovided = pprovideddict.get(pkg.cp)
5774                                                 if pprovided and \
5775                                                         portage.match_from_list(expanded_atom, pprovided):
5776                                                         # A provided package has been
5777                                                         # specified on the command line.
5778                                                         self._pprovided_args.append((arg, atom))
5779                                                         continue
5780                                         if pkg.installed and "selective" not in self.myparams:
5781                                                 self._unsatisfied_deps_for_display.append(
5782                                                         ((myroot, atom), {}))
5783                                                 # Previous behavior was to bail out in this case, but
5784                                                 # since the dep is satisfied by the installed package,
5785                                                 # it's more friendly to continue building the graph
5786                                                 # and just show a warning message. Therefore, only bail
5787                                                 # out here if the atom is not from either the system or
5788                                                 # world set.
5789                                                 if not (isinstance(arg, SetArg) and \
5790                                                         arg.name in ("system", "world")):
5791                                                         return 0, myfavorites
5792
5793                                         # Add the selected package to the graph as soon as possible
5794                                         # so that later dep_check() calls can use it as feedback
5795                                         # for making more consistent atom selections.
5796                                         if not self._add_pkg(pkg, dep):
5797                                                 if isinstance(arg, SetArg):
5798                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5799                                                                 "dependencies for %s from %s\n") % \
5800                                                                 (atom, arg.arg))
5801                                                 else:
5802                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5803                                                                 "dependencies for %s\n") % atom)
5804                                                 return 0, myfavorites
5805
5806                                 except portage.exception.MissingSignature, e:
5807                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5808                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5809                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5810                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5811                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5812                                         return 0, myfavorites
5813                                 except portage.exception.InvalidSignature, e:
5814                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5815                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5816                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5817                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5818                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5819                                         return 0, myfavorites
5820                                 except SystemExit, e:
5821                                         raise # Needed else can't exit
5822                                 except Exception, e:
5823                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5824                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5825                                         raise
5826
5827                 # Now that the root packages have been added to the graph,
5828                 # process the dependencies.
5829                 if not self._create_graph():
5830                         return 0, myfavorites
5831
5832                 missing=0
5833                 if "--usepkgonly" in self.myopts:
5834                         for xs in self.digraph.all_nodes():
5835                                 if not isinstance(xs, Package):
5836                                         continue
5837                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5838                                         if missing == 0:
5839                                                 print
5840                                         missing += 1
5841                                         print "Missing binary for:",xs[2]
5842
5843                 try:
5844                         self.altlist()
5845                 except self._unknown_internal_error:
5846                         return False, myfavorites
5847
5848                 # We're true here unless we are missing binaries.
5849                 return (not missing,myfavorites)
5850
5851         def _set_args(self, args):
5852                 """
5853                 Create the "args" package set from atoms and packages given as
5854                 arguments. This method can be called multiple times if necessary.
5855                 The package selection cache is automatically invalidated, since
5856                 arguments influence package selections.
5857                 """
5858                 args_set = self._sets["args"]
5859                 args_set.clear()
5860                 for arg in args:
5861                         if not isinstance(arg, (AtomArg, PackageArg)):
5862                                 continue
5863                         atom = arg.atom
5864                         if atom in args_set:
5865                                 continue
5866                         args_set.add(atom)
5867
5868                 self._set_atoms.clear()
5869                 self._set_atoms.update(chain(*self._sets.itervalues()))
5870                 atom_arg_map = self._atom_arg_map
5871                 atom_arg_map.clear()
5872                 for arg in args:
5873                         for atom in arg.set:
5874                                 atom_key = (atom, arg.root_config.root)
5875                                 refs = atom_arg_map.get(atom_key)
5876                                 if refs is None:
5877                                         refs = []
5878                                         atom_arg_map[atom_key] = refs
5879                                         if arg not in refs:
5880                                                 refs.append(arg)
5881
5882                 # Invalidate the package selection cache, since
5883                 # arguments influence package selections.
5884                 self._highest_pkg_cache.clear()
5885                 for trees in self._filtered_trees.itervalues():
5886                         trees["porttree"].dbapi._clear_cache()
5887
5888         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5889                 """
5890                 Return a list of slot atoms corresponding to installed slots that
5891                 differ from the slot of the highest visible match. When
5892                 blocker_lookahead is True, slot atoms that would trigger a blocker
5893                 conflict are automatically discarded, potentially allowing automatic
5894                 uninstallation of older slots when appropriate.
5895                 """
5896                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5897                 if highest_pkg is None:
5898                         return []
5899                 vardb = root_config.trees["vartree"].dbapi
5900                 slots = set()
5901                 for cpv in vardb.match(atom):
5902                         # don't mix new virtuals with old virtuals
5903                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5904                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5905
5906                 slots.add(highest_pkg.metadata["SLOT"])
5907                 if len(slots) == 1:
5908                         return []
5909                 greedy_pkgs = []
5910                 slots.remove(highest_pkg.metadata["SLOT"])
5911                 while slots:
5912                         slot = slots.pop()
5913                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5914                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5915                         if pkg is not None and \
5916                                 pkg.cp == highest_pkg.cp and pkg < highest_pkg:
5917                                 greedy_pkgs.append(pkg)
5918                 if not greedy_pkgs:
5919                         return []
5920                 if not blocker_lookahead:
5921                         return [pkg.slot_atom for pkg in greedy_pkgs]
5922
5923                 blockers = {}
5924                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5925                 for pkg in greedy_pkgs + [highest_pkg]:
5926                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5927                         try:
5928                                 atoms = self._select_atoms(
5929                                         pkg.root, dep_str, pkg.use.enabled,
5930                                         parent=pkg, strict=True)
5931                         except portage.exception.InvalidDependString:
5932                                 continue
5933                         blocker_atoms = (x for x in atoms if x.blocker)
5934                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5935
5936                 if highest_pkg not in blockers:
5937                         return []
5938
5939                 # filter packages with invalid deps
5940                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5941
5942                 # filter packages that conflict with highest_pkg
5943                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5944                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5945                         blockers[pkg].findAtomForPackage(highest_pkg))]
5946
5947                 if not greedy_pkgs:
5948                         return []
5949
5950                 # If two packages conflict, discard the lower version.
5951                 discard_pkgs = set()
5952                 greedy_pkgs.sort(reverse=True)
5953                 for i in xrange(len(greedy_pkgs) - 1):
5954                         pkg1 = greedy_pkgs[i]
5955                         if pkg1 in discard_pkgs:
5956                                 continue
5957                         for j in xrange(i + 1, len(greedy_pkgs)):
5958                                 pkg2 = greedy_pkgs[j]
5959                                 if pkg2 in discard_pkgs:
5960                                         continue
5961                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5962                                         blockers[pkg2].findAtomForPackage(pkg1):
5963                                         # pkg1 > pkg2
5964                                         discard_pkgs.add(pkg2)
5965
5966                 return [pkg.slot_atom for pkg in greedy_pkgs \
5967                         if pkg not in discard_pkgs]
5968
5969         def _select_atoms_from_graph(self, *pargs, **kwargs):
5970                 """
5971                 Prefer atoms matching packages that have already been
5972                 added to the graph or those that are installed and have
5973                 not been scheduled for replacement.
5974                 """
5975                 kwargs["trees"] = self._graph_trees
5976                 return self._select_atoms_highest_available(*pargs, **kwargs)
5977
5978         def _select_atoms_highest_available(self, root, depstring,
5979                 myuse=None, parent=None, strict=True, trees=None, priority=None):
5980                 """This will raise InvalidDependString if necessary. If trees is
5981                 None then self._filtered_trees is used."""
5982                 pkgsettings = self.pkgsettings[root]
5983                 if trees is None:
5984                         trees = self._filtered_trees
5985                 if not getattr(priority, "buildtime", False):
5986                         # The parent should only be passed to dep_check() for buildtime
5987                         # dependencies since that's the only case when it's appropriate
5988                         # to trigger the circular dependency avoidance code which uses it.
5989                         # It's important not to trigger the same circular dependency
5990                         # avoidance code for runtime dependencies since it's not needed
5991                         # and it can promote an incorrect package choice.
5992                         parent = None
5993                 if True:
5994                         try:
5995                                 if parent is not None:
5996                                         trees[root]["parent"] = parent
5997                                 if not strict:
5998                                         portage.dep._dep_check_strict = False
5999                                 mycheck = portage.dep_check(depstring, None,
6000                                         pkgsettings, myuse=myuse,
6001                                         myroot=root, trees=trees)
6002                         finally:
6003                                 if parent is not None:
6004                                         trees[root].pop("parent")
6005                                 portage.dep._dep_check_strict = True
6006                         if not mycheck[0]:
6007                                 raise portage.exception.InvalidDependString(mycheck[1])
6008                         selected_atoms = mycheck[1]
6009                 return selected_atoms
6010
6011         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
6012                 atom = portage.dep.Atom(atom)
6013                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6014                 atom_without_use = atom
6015                 if atom.use:
6016                         atom_without_use = portage.dep.remove_slot(atom)
6017                         if atom.slot:
6018                                 atom_without_use += ":" + atom.slot
6019                         atom_without_use = portage.dep.Atom(atom_without_use)
6020                 xinfo = '"%s"' % atom
6021                 if arg:
6022                         xinfo='"%s"' % arg
6023                 # Discard null/ from failed cpv_expand category expansion.
6024                 xinfo = xinfo.replace("null/", "")
6025                 masked_packages = []
6026                 missing_use = []
6027                 masked_pkg_instances = set()
6028                 missing_licenses = []
6029                 have_eapi_mask = False
6030                 pkgsettings = self.pkgsettings[root]
6031                 implicit_iuse = pkgsettings._get_implicit_iuse()
6032                 root_config = self.roots[root]
6033                 portdb = self.roots[root].trees["porttree"].dbapi
6034                 dbs = self._filtered_trees[root]["dbs"]
6035                 for db, pkg_type, built, installed, db_keys in dbs:
6036                         if installed:
6037                                 continue
6038                         match = db.match
6039                         if hasattr(db, "xmatch"):
6040                                 cpv_list = db.xmatch("match-all", atom_without_use)
6041                         else:
6042                                 cpv_list = db.match(atom_without_use)
6043                         # descending order
6044                         cpv_list.reverse()
6045                         for cpv in cpv_list:
6046                                 metadata, mreasons  = get_mask_info(root_config, cpv,
6047                                         pkgsettings, db, pkg_type, built, installed, db_keys)
6048                                 if metadata is not None:
6049                                         pkg = Package(built=built, cpv=cpv,
6050                                                 installed=installed, metadata=metadata,
6051                                                 root_config=root_config)
6052                                         if pkg.cp != atom.cp:
6053                                                 # A cpv can be returned from dbapi.match() as an
6054                                                 # old-style virtual match even in cases when the
6055                                                 # package does not actually PROVIDE the virtual.
6056                                                 # Filter out any such false matches here.
6057                                                 if not atom_set.findAtomForPackage(pkg):
6058                                                         continue
6059                                         if mreasons:
6060                                                 masked_pkg_instances.add(pkg)
6061                                         if atom.use:
6062                                                 missing_use.append(pkg)
6063                                                 if not mreasons:
6064                                                         continue
6065                                 masked_packages.append(
6066                                         (root_config, pkgsettings, cpv, metadata, mreasons))
6067
6068                 missing_use_reasons = []
6069                 missing_iuse_reasons = []
6070                 for pkg in missing_use:
6071                         use = pkg.use.enabled
6072                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
6073                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
6074                         missing_iuse = []
6075                         for x in atom.use.required:
6076                                 if iuse_re.match(x) is None:
6077                                         missing_iuse.append(x)
6078                         mreasons = []
6079                         if missing_iuse:
6080                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
6081                                 missing_iuse_reasons.append((pkg, mreasons))
6082                         else:
6083                                 need_enable = sorted(atom.use.enabled.difference(use))
6084                                 need_disable = sorted(atom.use.disabled.intersection(use))
6085                                 if need_enable or need_disable:
6086                                         changes = []
6087                                         changes.extend(colorize("red", "+" + x) \
6088                                                 for x in need_enable)
6089                                         changes.extend(colorize("blue", "-" + x) \
6090                                                 for x in need_disable)
6091                                         mreasons.append("Change USE: %s" % " ".join(changes))
6092                                         missing_use_reasons.append((pkg, mreasons))
6093
6094                 unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6095                         in missing_use_reasons if pkg not in masked_pkg_instances]
6096
6097                 unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
6098                         in missing_iuse_reasons if pkg not in masked_pkg_instances]
6099
6100                 show_missing_use = False
6101                 if unmasked_use_reasons:
6102                         # Only show the latest version.
6103                         show_missing_use = unmasked_use_reasons[:1]
6104                 elif unmasked_iuse_reasons:
6105                         if missing_use_reasons:
6106                                 # All packages with required IUSE are masked,
6107                                 # so display a normal masking message.
6108                                 pass
6109                         else:
6110                                 show_missing_use = unmasked_iuse_reasons
6111
6112                 if show_missing_use:
6113                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
6114                         print "!!! One of the following packages is required to complete your request:"
6115                         for pkg, mreasons in show_missing_use:
6116                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
6117
6118                 elif masked_packages:
6119                         print "\n!!! " + \
6120                                 colorize("BAD", "All ebuilds that could satisfy ") + \
6121                                 colorize("INFORM", xinfo) + \
6122                                 colorize("BAD", " have been masked.")
6123                         print "!!! One of the following masked packages is required to complete your request:"
6124                         have_eapi_mask = show_masked_packages(masked_packages)
6125                         if have_eapi_mask:
6126                                 print
6127                                 msg = ("The current version of portage supports " + \
6128                                         "EAPI '%s'. You must upgrade to a newer version" + \
6129                                         " of portage before EAPI masked packages can" + \
6130                                         " be installed.") % portage.const.EAPI
6131                                 from textwrap import wrap
6132                                 for line in wrap(msg, 75):
6133                                         print line
6134                         print
6135                         show_mask_docs()
6136                 else:
6137                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
6138
6139                 # Show parent nodes and the argument that pulled them in.
6140                 traversed_nodes = set()
6141                 node = myparent
6142                 msg = []
6143                 while node is not None:
6144                         traversed_nodes.add(node)
6145                         msg.append('(dependency required by "%s" [%s])' % \
6146                                 (colorize('INFORM', str(node.cpv)), node.type_name))
6147                         # When traversing to parents, prefer arguments over packages
6148                         # since arguments are root nodes. Never traverse the same
6149                         # package twice, in order to prevent an infinite loop.
6150                         selected_parent = None
6151                         for parent in self.digraph.parent_nodes(node):
6152                                 if isinstance(parent, DependencyArg):
6153                                         msg.append('(dependency required by "%s" [argument])' % \
6154                                                 (colorize('INFORM', str(parent))))
6155                                         selected_parent = None
6156                                         break
6157                                 if parent not in traversed_nodes:
6158                                         selected_parent = parent
6159                         node = selected_parent
6160                 for line in msg:
6161                         print line
6162
6163                 print
6164
6165         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
6166                 cache_key = (root, atom, onlydeps)
6167                 ret = self._highest_pkg_cache.get(cache_key)
6168                 if ret is not None:
6169                         pkg, existing = ret
6170                         if pkg and not existing:
6171                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
6172                                 if existing and existing == pkg:
6173                                         # Update the cache to reflect that the
6174                                         # package has been added to the graph.
6175                                         ret = pkg, pkg
6176                                         self._highest_pkg_cache[cache_key] = ret
6177                         return ret
6178                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
6179                 self._highest_pkg_cache[cache_key] = ret
6180                 pkg, existing = ret
6181                 if pkg is not None:
6182                         settings = pkg.root_config.settings
6183                         if visible(settings, pkg) and not (pkg.installed and \
6184                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
6185                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
6186                 return ret
6187
6188         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
6189                 root_config = self.roots[root]
6190                 pkgsettings = self.pkgsettings[root]
6191                 dbs = self._filtered_trees[root]["dbs"]
6192                 vardb = self.roots[root].trees["vartree"].dbapi
6193                 portdb = self.roots[root].trees["porttree"].dbapi
6194                 # List of acceptable packages, ordered by type preference.
6195                 matched_packages = []
6196                 highest_version = None
6197                 if not isinstance(atom, portage.dep.Atom):
6198                         atom = portage.dep.Atom(atom)
6199                 atom_cp = atom.cp
6200                 atom_set = InternalPackageSet(initial_atoms=(atom,))
6201                 existing_node = None
6202                 myeb = None
6203                 usepkgonly = "--usepkgonly" in self.myopts
6204                 empty = "empty" in self.myparams
6205                 selective = "selective" in self.myparams
6206                 reinstall = False
6207                 noreplace = "--noreplace" in self.myopts
6208                 # Behavior of the "selective" parameter depends on
6209                 # whether or not a package matches an argument atom.
6210                 # If an installed package provides an old-style
6211                 # virtual that is no longer provided by an available
6212                 # package, the installed package may match an argument
6213                 # atom even though none of the available packages do.
6214                 # Therefore, "selective" logic does not consider
6215                 # whether or not an installed package matches an
6216                 # argument atom. It only considers whether or not
6217                 # available packages match argument atoms, which is
6218                 # represented by the found_available_arg flag.
6219                 found_available_arg = False
6220                 for find_existing_node in True, False:
6221                         if existing_node:
6222                                 break
6223                         for db, pkg_type, built, installed, db_keys in dbs:
6224                                 if existing_node:
6225                                         break
6226                                 if installed and not find_existing_node:
6227                                         want_reinstall = reinstall or empty or \
6228                                                 (found_available_arg and not selective)
6229                                         if want_reinstall and matched_packages:
6230                                                 continue
6231                                 if hasattr(db, "xmatch"):
6232                                         cpv_list = db.xmatch("match-all", atom)
6233                                 else:
6234                                         cpv_list = db.match(atom)
6235
6236                                 # USE=multislot can make an installed package appear as if
6237                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
6238                                 # won't do any good as long as USE=multislot is enabled since
6239                                 # the newly built package still won't have the expected slot.
6240                                 # Therefore, assume that such SLOT dependencies are already
6241                                 # satisfied rather than forcing a rebuild.
6242                                 if installed and not cpv_list and atom.slot:
6243                                         for cpv in db.match(atom.cp):
6244                                                 slot_available = False
6245                                                 for other_db, other_type, other_built, \
6246                                                         other_installed, other_keys in dbs:
6247                                                         try:
6248                                                                 if atom.slot == \
6249                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
6250                                                                         slot_available = True
6251                                                                         break
6252                                                         except KeyError:
6253                                                                 pass
6254                                                 if not slot_available:
6255                                                         continue
6256                                                 inst_pkg = self._pkg(cpv, "installed",
6257                                                         root_config, installed=installed)
6258                                                 # Remove the slot from the atom and verify that
6259                                                 # the package matches the resulting atom.
6260                                                 atom_without_slot = portage.dep.remove_slot(atom)
6261                                                 if atom.use:
6262                                                         atom_without_slot += str(atom.use)
6263                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
6264                                                 if portage.match_from_list(
6265                                                         atom_without_slot, [inst_pkg]):
6266                                                         cpv_list = [inst_pkg.cpv]
6267                                                 break
6268
6269                                 if not cpv_list:
6270                                         continue
6271                                 pkg_status = "merge"
6272                                 if installed or onlydeps:
6273                                         pkg_status = "nomerge"
6274                                 # descending order
6275                                 cpv_list.reverse()
6276                                 for cpv in cpv_list:
6277                                         # Make --noreplace take precedence over --newuse.
6278                                         if not installed and noreplace and \
6279                                                 cpv in vardb.match(atom):
6280                                                 # If the installed version is masked, it may
6281                                                 # be necessary to look at lower versions,
6282                                                 # in case there is a visible downgrade.
6283                                                 continue
6284                                         reinstall_for_flags = None
6285                                         cache_key = (pkg_type, root, cpv, pkg_status)
6286                                         calculated_use = True
6287                                         pkg = self._pkg_cache.get(cache_key)
6288                                         if pkg is None:
6289                                                 calculated_use = False
6290                                                 try:
6291                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6292                                                 except KeyError:
6293                                                         continue
6294                                                 pkg = Package(built=built, cpv=cpv,
6295                                                         installed=installed, metadata=metadata,
6296                                                         onlydeps=onlydeps, root_config=root_config,
6297                                                         type_name=pkg_type)
6298                                                 metadata = pkg.metadata
6299                                                 if not built:
6300                                                         metadata['CHOST'] = pkgsettings.get('CHOST', '')
6301                                                 if not built and ("?" in metadata["LICENSE"] or \
6302                                                         "?" in metadata["PROVIDE"]):
6303                                                         # This is avoided whenever possible because
6304                                                         # it's expensive. It only needs to be done here
6305                                                         # if it has an effect on visibility.
6306                                                         pkgsettings.setcpv(pkg)
6307                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6308                                                         calculated_use = True
6309                                                 self._pkg_cache[pkg] = pkg
6310
6311                                         if not installed or (built and matched_packages):
6312                                                 # Only enforce visibility on installed packages
6313                                                 # if there is at least one other visible package
6314                                                 # available. By filtering installed masked packages
6315                                                 # here, packages that have been masked since they
6316                                                 # were installed can be automatically downgraded
6317                                                 # to an unmasked version.
6318                                                 try:
6319                                                         if not visible(pkgsettings, pkg):
6320                                                                 continue
6321                                                 except portage.exception.InvalidDependString:
6322                                                         if not installed:
6323                                                                 continue
6324
6325                                                 # Enable upgrade or downgrade to a version
6326                                                 # with visible KEYWORDS when the installed
6327                                                 # version is masked by KEYWORDS, but never
6328                                                 # reinstall the same exact version only due
6329                                                 # to a KEYWORDS mask.
6330                                                 if built and matched_packages:
6331
6332                                                         different_version = None
6333                                                         for avail_pkg in matched_packages:
6334                                                                 if not portage.dep.cpvequal(
6335                                                                         pkg.cpv, avail_pkg.cpv):
6336                                                                         different_version = avail_pkg
6337                                                                         break
6338                                                         if different_version is not None:
6339
6340                                                                 if installed and \
6341                                                                         pkgsettings._getMissingKeywords(
6342                                                                         pkg.cpv, pkg.metadata):
6343                                                                         continue
6344
6345                                                                 # If the ebuild no longer exists or it's
6346                                                                 # keywords have been dropped, reject built
6347                                                                 # instances (installed or binary).
6348                                                                 # If --usepkgonly is enabled, assume that
6349                                                                 # the ebuild status should be ignored.
6350                                                                 if not usepkgonly:
6351                                                                         try:
6352                                                                                 pkg_eb = self._pkg(
6353                                                                                         pkg.cpv, "ebuild", root_config)
6354                                                                         except portage.exception.PackageNotFound:
6355                                                                                 continue
6356                                                                         else:
6357                                                                                 if not visible(pkgsettings, pkg_eb):
6358                                                                                         continue
6359
6360                                         if not pkg.built and not calculated_use:
6361                                                 # This is avoided whenever possible because
6362                                                 # it's expensive.
6363                                                 pkgsettings.setcpv(pkg)
6364                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6365
6366                                         if pkg.cp != atom.cp:
6367                                                 # A cpv can be returned from dbapi.match() as an
6368                                                 # old-style virtual match even in cases when the
6369                                                 # package does not actually PROVIDE the virtual.
6370                                                 # Filter out any such false matches here.
6371                                                 if not atom_set.findAtomForPackage(pkg):
6372                                                         continue
6373
6374                                         myarg = None
6375                                         if root == self.target_root:
6376                                                 try:
6377                                                         # Ebuild USE must have been calculated prior
6378                                                         # to this point, in case atoms have USE deps.
6379                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6380                                                 except StopIteration:
6381                                                         pass
6382                                                 except portage.exception.InvalidDependString:
6383                                                         if not installed:
6384                                                                 # masked by corruption
6385                                                                 continue
6386                                         if not installed and myarg:
6387                                                 found_available_arg = True
6388
6389                                         if atom.use and not pkg.built:
6390                                                 use = pkg.use.enabled
6391                                                 if atom.use.enabled.difference(use):
6392                                                         continue
6393                                                 if atom.use.disabled.intersection(use):
6394                                                         continue
6395                                         if pkg.cp == atom_cp:
6396                                                 if highest_version is None:
6397                                                         highest_version = pkg
6398                                                 elif pkg > highest_version:
6399                                                         highest_version = pkg
6400                                         # At this point, we've found the highest visible
6401                                         # match from the current repo. Any lower versions
6402                                         # from this repo are ignored, so this so the loop
6403                                         # will always end with a break statement below
6404                                         # this point.
6405                                         if find_existing_node:
6406                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6407                                                 if not e_pkg:
6408                                                         break
6409                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6410                                                         if highest_version and \
6411                                                                 e_pkg.cp == atom_cp and \
6412                                                                 e_pkg < highest_version and \
6413                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6414                                                                 # There is a higher version available in a
6415                                                                 # different slot, so this existing node is
6416                                                                 # irrelevant.
6417                                                                 pass
6418                                                         else:
6419                                                                 matched_packages.append(e_pkg)
6420                                                                 existing_node = e_pkg
6421                                                 break
6422                                         # Compare built package to current config and
6423                                         # reject the built package if necessary.
6424                                         if built and not installed and \
6425                                                 ("--newuse" in self.myopts or \
6426                                                 "--reinstall" in self.myopts):
6427                                                 iuses = pkg.iuse.all
6428                                                 old_use = pkg.use.enabled
6429                                                 if myeb:
6430                                                         pkgsettings.setcpv(myeb)
6431                                                 else:
6432                                                         pkgsettings.setcpv(pkg)
6433                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6434                                                 forced_flags = set()
6435                                                 forced_flags.update(pkgsettings.useforce)
6436                                                 forced_flags.update(pkgsettings.usemask)
6437                                                 cur_iuse = iuses
6438                                                 if myeb and not usepkgonly:
6439                                                         cur_iuse = myeb.iuse.all
6440                                                 if self._reinstall_for_flags(forced_flags,
6441                                                         old_use, iuses,
6442                                                         now_use, cur_iuse):
6443                                                         break
6444                                         # Compare current config to installed package
6445                                         # and do not reinstall if possible.
6446                                         if not installed and \
6447                                                 ("--newuse" in self.myopts or \
6448                                                 "--reinstall" in self.myopts) and \
6449                                                 cpv in vardb.match(atom):
6450                                                 pkgsettings.setcpv(pkg)
6451                                                 forced_flags = set()
6452                                                 forced_flags.update(pkgsettings.useforce)
6453                                                 forced_flags.update(pkgsettings.usemask)
6454                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6455                                                 old_iuse = set(filter_iuse_defaults(
6456                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6457                                                 cur_use = pkg.use.enabled
6458                                                 cur_iuse = pkg.iuse.all
6459                                                 reinstall_for_flags = \
6460                                                         self._reinstall_for_flags(
6461                                                         forced_flags, old_use, old_iuse,
6462                                                         cur_use, cur_iuse)
6463                                                 if reinstall_for_flags:
6464                                                         reinstall = True
6465                                         if not built:
6466                                                 myeb = pkg
6467                                         matched_packages.append(pkg)
6468                                         if reinstall_for_flags:
6469                                                 self._reinstall_nodes[pkg] = \
6470                                                         reinstall_for_flags
6471                                         break
6472
6473                 if not matched_packages:
6474                         return None, None
6475
6476                 if "--debug" in self.myopts:
6477                         for pkg in matched_packages:
6478                                 portage.writemsg("%s %s\n" % \
6479                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6480
6481                 # Filter out any old-style virtual matches if they are
6482                 # mixed with new-style virtual matches.
6483                 cp = portage.dep_getkey(atom)
6484                 if len(matched_packages) > 1 and \
6485                         "virtual" == portage.catsplit(cp)[0]:
6486                         for pkg in matched_packages:
6487                                 if pkg.cp != cp:
6488                                         continue
6489                                 # Got a new-style virtual, so filter
6490                                 # out any old-style virtuals.
6491                                 matched_packages = [pkg for pkg in matched_packages \
6492                                         if pkg.cp == cp]
6493                                 break
6494
6495                 if len(matched_packages) > 1:
6496                         bestmatch = portage.best(
6497                                 [pkg.cpv for pkg in matched_packages])
6498                         matched_packages = [pkg for pkg in matched_packages \
6499                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6500
6501                 # ordered by type preference ("ebuild" type is the last resort)
6502                 return  matched_packages[-1], existing_node
6503
6504         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6505                 """
6506                 Select packages that have already been added to the graph or
6507                 those that are installed and have not been scheduled for
6508                 replacement.
6509                 """
6510                 graph_db = self._graph_trees[root]["porttree"].dbapi
6511                 matches = graph_db.match_pkgs(atom)
6512                 if not matches:
6513                         return None, None
6514                 pkg = matches[-1] # highest match
6515                 in_graph = self._slot_pkg_map[root].get(pkg.slot_atom)
6516                 return pkg, in_graph
6517
6518         def _complete_graph(self):
6519                 """
6520                 Add any deep dependencies of required sets (args, system, world) that
6521                 have not been pulled into the graph yet. This ensures that the graph
6522                 is consistent such that initially satisfied deep dependencies are not
6523                 broken in the new graph. Initially unsatisfied dependencies are
6524                 irrelevant since we only want to avoid breaking dependencies that are
6525                 intially satisfied.
6526
6527                 Since this method can consume enough time to disturb users, it is
6528                 currently only enabled by the --complete-graph option.
6529                 """
6530                 if "--buildpkgonly" in self.myopts or \
6531                         "recurse" not in self.myparams:
6532                         return 1
6533
6534                 if "complete" not in self.myparams:
6535                         # Skip this to avoid consuming enough time to disturb users.
6536                         return 1
6537
6538                 # Put the depgraph into a mode that causes it to only
6539                 # select packages that have already been added to the
6540                 # graph or those that are installed and have not been
6541                 # scheduled for replacement. Also, toggle the "deep"
6542                 # parameter so that all dependencies are traversed and
6543                 # accounted for.
6544                 self._select_atoms = self._select_atoms_from_graph
6545                 self._select_package = self._select_pkg_from_graph
6546                 already_deep = "deep" in self.myparams
6547                 if not already_deep:
6548                         self.myparams.add("deep")
6549
6550                 for root in self.roots:
6551                         required_set_names = self._required_set_names.copy()
6552                         if root == self.target_root and \
6553                                 (already_deep or "empty" in self.myparams):
6554                                 required_set_names.difference_update(self._sets)
6555                         if not required_set_names and not self._ignored_deps:
6556                                 continue
6557                         root_config = self.roots[root]
6558                         setconfig = root_config.setconfig
6559                         args = []
6560                         # Reuse existing SetArg instances when available.
6561                         for arg in self.digraph.root_nodes():
6562                                 if not isinstance(arg, SetArg):
6563                                         continue
6564                                 if arg.root_config != root_config:
6565                                         continue
6566                                 if arg.name in required_set_names:
6567                                         args.append(arg)
6568                                         required_set_names.remove(arg.name)
6569                         # Create new SetArg instances only when necessary.
6570                         for s in required_set_names:
6571                                 expanded_set = InternalPackageSet(
6572                                         initial_atoms=setconfig.getSetAtoms(s))
6573                                 atom = SETPREFIX + s
6574                                 args.append(SetArg(arg=atom, set=expanded_set,
6575                                         root_config=root_config))
6576                         vardb = root_config.trees["vartree"].dbapi
6577                         for arg in args:
6578                                 for atom in arg.set:
6579                                         self._dep_stack.append(
6580                                                 Dependency(atom=atom, root=root, parent=arg))
6581                         if self._ignored_deps:
6582                                 self._dep_stack.extend(self._ignored_deps)
6583                                 self._ignored_deps = []
6584                         if not self._create_graph(allow_unsatisfied=True):
6585                                 return 0
6586                         # Check the unsatisfied deps to see if any initially satisfied deps
6587                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6588                         # deps are irrelevant since we only want to avoid breaking deps
6589                         # that are initially satisfied.
6590                         while self._unsatisfied_deps:
6591                                 dep = self._unsatisfied_deps.pop()
6592                                 matches = vardb.match_pkgs(dep.atom)
6593                                 if not matches:
6594                                         self._initially_unsatisfied_deps.append(dep)
6595                                         continue
6596                                 # An scheduled installation broke a deep dependency.
6597                                 # Add the installed package to the graph so that it
6598                                 # will be appropriately reported as a slot collision
6599                                 # (possibly solvable via backtracking).
6600                                 pkg = matches[-1] # highest match
6601                                 if not self._add_pkg(pkg, dep):
6602                                         return 0
6603                                 if not self._create_graph(allow_unsatisfied=True):
6604                                         return 0
6605                 return 1
6606
6607         def _pkg(self, cpv, type_name, root_config, installed=False):
6608                 """
6609                 Get a package instance from the cache, or create a new
6610                 one if necessary. Raises KeyError from aux_get if it
6611                 failures for some reason (package does not exist or is
6612                 corrupt).
6613                 """
6614                 operation = "merge"
6615                 if installed:
6616                         operation = "nomerge"
6617                 pkg = self._pkg_cache.get(
6618                         (type_name, root_config.root, cpv, operation))
6619                 if pkg is None:
6620                         tree_type = self.pkg_tree_map[type_name]
6621                         db = root_config.trees[tree_type].dbapi
6622                         db_keys = list(self._trees_orig[root_config.root][
6623                                 tree_type].dbapi._aux_cache_keys)
6624                         try:
6625                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6626                         except KeyError:
6627                                 raise portage.exception.PackageNotFound(cpv)
6628                         pkg = Package(cpv=cpv, metadata=metadata,
6629                                 root_config=root_config, installed=installed)
6630                         if type_name == "ebuild":
6631                                 settings = self.pkgsettings[root_config.root]
6632                                 settings.setcpv(pkg)
6633                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6634                                 pkg.metadata['CHOST'] = settings.get('CHOST', '')
6635                         self._pkg_cache[pkg] = pkg
6636                 return pkg
6637
6638         def validate_blockers(self):
6639                 """Remove any blockers from the digraph that do not match any of the
6640                 packages within the graph.  If necessary, create hard deps to ensure
6641                 correct merge order such that mutually blocking packages are never
6642                 installed simultaneously."""
6643
6644                 if "--buildpkgonly" in self.myopts or \
6645                         "--nodeps" in self.myopts:
6646                         return True
6647
6648                 #if "deep" in self.myparams:
6649                 if True:
6650                         # Pull in blockers from all installed packages that haven't already
6651                         # been pulled into the depgraph.  This is not enabled by default
6652                         # due to the performance penalty that is incurred by all the
6653                         # additional dep_check calls that are required.
6654
6655                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6656                         for myroot in self.trees:
6657                                 vardb = self.trees[myroot]["vartree"].dbapi
6658                                 portdb = self.trees[myroot]["porttree"].dbapi
6659                                 pkgsettings = self.pkgsettings[myroot]
6660                                 final_db = self.mydbapi[myroot]
6661
6662                                 blocker_cache = BlockerCache(myroot, vardb)
6663                                 stale_cache = set(blocker_cache)
6664                                 for pkg in vardb:
6665                                         cpv = pkg.cpv
6666                                         stale_cache.discard(cpv)
6667                                         pkg_in_graph = self.digraph.contains(pkg)
6668
6669                                         # Check for masked installed packages. Only warn about
6670                                         # packages that are in the graph in order to avoid warning
6671                                         # about those that will be automatically uninstalled during
6672                                         # the merge process or by --depclean.
6673                                         if pkg in final_db:
6674                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6675                                                         self._masked_installed.add(pkg)
6676
6677                                         blocker_atoms = None
6678                                         blockers = None
6679                                         if pkg_in_graph:
6680                                                 blockers = []
6681                                                 try:
6682                                                         blockers.extend(
6683                                                                 self._blocker_parents.child_nodes(pkg))
6684                                                 except KeyError:
6685                                                         pass
6686                                                 try:
6687                                                         blockers.extend(
6688                                                                 self._irrelevant_blockers.child_nodes(pkg))
6689                                                 except KeyError:
6690                                                         pass
6691                                         if blockers is not None:
6692                                                 blockers = set(str(blocker.atom) \
6693                                                         for blocker in blockers)
6694
6695                                         # If this node has any blockers, create a "nomerge"
6696                                         # node for it so that they can be enforced.
6697                                         self.spinner.update()
6698                                         blocker_data = blocker_cache.get(cpv)
6699                                         if blocker_data is not None and \
6700                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6701                                                 blocker_data = None
6702
6703                                         # If blocker data from the graph is available, use
6704                                         # it to validate the cache and update the cache if
6705                                         # it seems invalid.
6706                                         if blocker_data is not None and \
6707                                                 blockers is not None:
6708                                                 if not blockers.symmetric_difference(
6709                                                         blocker_data.atoms):
6710                                                         continue
6711                                                 blocker_data = None
6712
6713                                         if blocker_data is None and \
6714                                                 blockers is not None:
6715                                                 # Re-use the blockers from the graph.
6716                                                 blocker_atoms = sorted(blockers)
6717                                                 counter = long(pkg.metadata["COUNTER"])
6718                                                 blocker_data = \
6719                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6720                                                 blocker_cache[pkg.cpv] = blocker_data
6721                                                 continue
6722
6723                                         if blocker_data:
6724                                                 blocker_atoms = blocker_data.atoms
6725                                         else:
6726                                                 # Use aux_get() to trigger FakeVartree global
6727                                                 # updates on *DEPEND when appropriate.
6728                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6729                                                 # It is crucial to pass in final_db here in order to
6730                                                 # optimize dep_check calls by eliminating atoms via
6731                                                 # dep_wordreduce and dep_eval calls.
6732                                                 try:
6733                                                         portage.dep._dep_check_strict = False
6734                                                         try:
6735                                                                 success, atoms = portage.dep_check(depstr,
6736                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6737                                                                         trees=self._graph_trees, myroot=myroot)
6738                                                         except Exception, e:
6739                                                                 if isinstance(e, SystemExit):
6740                                                                         raise
6741                                                                 # This is helpful, for example, if a ValueError
6742                                                                 # is thrown from cpv_expand due to multiple
6743                                                                 # matches (this can happen if an atom lacks a
6744                                                                 # category).
6745                                                                 show_invalid_depstring_notice(
6746                                                                         pkg, depstr, str(e))
6747                                                                 del e
6748                                                                 raise
6749                                                 finally:
6750                                                         portage.dep._dep_check_strict = True
6751                                                 if not success:
6752                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6753                                                         if replacement_pkg and \
6754                                                                 replacement_pkg[0].operation == "merge":
6755                                                                 # This package is being replaced anyway, so
6756                                                                 # ignore invalid dependencies so as not to
6757                                                                 # annoy the user too much (otherwise they'd be
6758                                                                 # forced to manually unmerge it first).
6759                                                                 continue
6760                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6761                                                         return False
6762                                                 blocker_atoms = [myatom for myatom in atoms \
6763                                                         if myatom.startswith("!")]
6764                                                 blocker_atoms.sort()
6765                                                 counter = long(pkg.metadata["COUNTER"])
6766                                                 blocker_cache[cpv] = \
6767                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6768                                         if blocker_atoms:
6769                                                 try:
6770                                                         for atom in blocker_atoms:
6771                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6772                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6773                                                                 self._blocker_parents.add(blocker, pkg)
6774                                                 except portage.exception.InvalidAtom, e:
6775                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6776                                                         show_invalid_depstring_notice(
6777                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6778                                                         return False
6779                                 for cpv in stale_cache:
6780                                         del blocker_cache[cpv]
6781                                 blocker_cache.flush()
6782                                 del blocker_cache
6783
6784                 # Discard any "uninstall" tasks scheduled by previous calls
6785                 # to this method, since those tasks may not make sense given
6786                 # the current graph state.
6787                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6788                 if previous_uninstall_tasks:
6789                         self._blocker_uninstalls = digraph()
6790                         self.digraph.difference_update(previous_uninstall_tasks)
6791
6792                 for blocker in self._blocker_parents.leaf_nodes():
6793                         self.spinner.update()
6794                         root_config = self.roots[blocker.root]
6795                         virtuals = root_config.settings.getvirtuals()
6796                         myroot = blocker.root
6797                         initial_db = self.trees[myroot]["vartree"].dbapi
6798                         final_db = self.mydbapi[myroot]
6799                         
6800                         provider_virtual = False
6801                         if blocker.cp in virtuals and \
6802                                 not self._have_new_virt(blocker.root, blocker.cp):
6803                                 provider_virtual = True
6804
6805                         # Use this to check PROVIDE for each matched package
6806                         # when necessary.
6807                         atom_set = InternalPackageSet(
6808                                 initial_atoms=[blocker.atom])
6809
6810                         if provider_virtual:
6811                                 atoms = []
6812                                 for provider_entry in virtuals[blocker.cp]:
6813                                         provider_cp = \
6814                                                 portage.dep_getkey(provider_entry)
6815                                         atoms.append(blocker.atom.replace(
6816                                                 blocker.cp, provider_cp))
6817                         else:
6818                                 atoms = [blocker.atom]
6819
6820                         blocked_initial = set()
6821                         for atom in atoms:
6822                                 for pkg in initial_db.match_pkgs(atom):
6823                                         if atom_set.findAtomForPackage(pkg):
6824                                                 blocked_initial.add(pkg)
6825
6826                         blocked_final = set()
6827                         for atom in atoms:
6828                                 for pkg in final_db.match_pkgs(atom):
6829                                         if atom_set.findAtomForPackage(pkg):
6830                                                 blocked_final.add(pkg)
6831
6832                         if not blocked_initial and not blocked_final:
6833                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6834                                 self._blocker_parents.remove(blocker)
6835                                 # Discard any parents that don't have any more blockers.
6836                                 for pkg in parent_pkgs:
6837                                         self._irrelevant_blockers.add(blocker, pkg)
6838                                         if not self._blocker_parents.child_nodes(pkg):
6839                                                 self._blocker_parents.remove(pkg)
6840                                 continue
6841                         for parent in self._blocker_parents.parent_nodes(blocker):
6842                                 unresolved_blocks = False
6843                                 depends_on_order = set()
6844                                 for pkg in blocked_initial:
6845                                         if pkg.slot_atom == parent.slot_atom:
6846                                                 # TODO: Support blocks within slots in cases where it
6847                                                 # might make sense.  For example, a new version might
6848                                                 # require that the old version be uninstalled at build
6849                                                 # time.
6850                                                 continue
6851                                         if parent.installed:
6852                                                 # Two currently installed packages conflict with
6853                                                 # eachother. Ignore this case since the damage
6854                                                 # is already done and this would be likely to
6855                                                 # confuse users if displayed like a normal blocker.
6856                                                 continue
6857
6858                                         self._blocked_pkgs.add(pkg, blocker)
6859
6860                                         if parent.operation == "merge":
6861                                                 # Maybe the blocked package can be replaced or simply
6862                                                 # unmerged to resolve this block.
6863                                                 depends_on_order.add((pkg, parent))
6864                                                 continue
6865                                         # None of the above blocker resolutions techniques apply,
6866                                         # so apparently this one is unresolvable.
6867                                         unresolved_blocks = True
6868                                 for pkg in blocked_final:
6869                                         if pkg.slot_atom == parent.slot_atom:
6870                                                 # TODO: Support blocks within slots.
6871                                                 continue
6872                                         if parent.operation == "nomerge" and \
6873                                                 pkg.operation == "nomerge":
6874                                                 # This blocker will be handled the next time that a
6875                                                 # merge of either package is triggered.
6876                                                 continue
6877
6878                                         self._blocked_pkgs.add(pkg, blocker)
6879
6880                                         # Maybe the blocking package can be
6881                                         # unmerged to resolve this block.
6882                                         if parent.operation == "merge" and pkg.installed:
6883                                                 depends_on_order.add((pkg, parent))
6884                                                 continue
6885                                         elif parent.operation == "nomerge":
6886                                                 depends_on_order.add((parent, pkg))
6887                                                 continue
6888                                         # None of the above blocker resolutions techniques apply,
6889                                         # so apparently this one is unresolvable.
6890                                         unresolved_blocks = True
6891
6892                                 # Make sure we don't unmerge any package that have been pulled
6893                                 # into the graph.
6894                                 if not unresolved_blocks and depends_on_order:
6895                                         for inst_pkg, inst_task in depends_on_order:
6896                                                 if self.digraph.contains(inst_pkg) and \
6897                                                         self.digraph.parent_nodes(inst_pkg):
6898                                                         unresolved_blocks = True
6899                                                         break
6900
6901                                 if not unresolved_blocks and depends_on_order:
6902                                         for inst_pkg, inst_task in depends_on_order:
6903                                                 uninst_task = Package(built=inst_pkg.built,
6904                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6905                                                         metadata=inst_pkg.metadata,
6906                                                         operation="uninstall",
6907                                                         root_config=inst_pkg.root_config,
6908                                                         type_name=inst_pkg.type_name)
6909                                                 self._pkg_cache[uninst_task] = uninst_task
6910                                                 # Enforce correct merge order with a hard dep.
6911                                                 self.digraph.addnode(uninst_task, inst_task,
6912                                                         priority=BlockerDepPriority.instance)
6913                                                 # Count references to this blocker so that it can be
6914                                                 # invalidated after nodes referencing it have been
6915                                                 # merged.
6916                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6917                                 if not unresolved_blocks and not depends_on_order:
6918                                         self._irrelevant_blockers.add(blocker, parent)
6919                                         self._blocker_parents.remove_edge(blocker, parent)
6920                                         if not self._blocker_parents.parent_nodes(blocker):
6921                                                 self._blocker_parents.remove(blocker)
6922                                         if not self._blocker_parents.child_nodes(parent):
6923                                                 self._blocker_parents.remove(parent)
6924                                 if unresolved_blocks:
6925                                         self._unsolvable_blockers.add(blocker, parent)
6926
6927                 return True
6928
6929         def _accept_blocker_conflicts(self):
6930                 acceptable = False
6931                 for x in ("--buildpkgonly", "--fetchonly",
6932                         "--fetch-all-uri", "--nodeps"):
6933                         if x in self.myopts:
6934                                 acceptable = True
6935                                 break
6936                 return acceptable
6937
6938         def _merge_order_bias(self, mygraph):
6939                 """
6940                 For optimal leaf node selection, promote deep system runtime deps and
6941                 order nodes from highest to lowest overall reference count.
6942                 """
6943
6944                 node_info = {}
6945                 for node in mygraph.order:
6946                         node_info[node] = len(mygraph.parent_nodes(node))
6947                 deep_system_deps = _find_deep_system_runtime_deps(mygraph)
6948
6949                 def cmp_merge_preference(node1, node2):
6950
6951                         if node1.operation == 'uninstall':
6952                                 if node2.operation == 'uninstall':
6953                                         return 0
6954                                 return 1
6955
6956                         if node2.operation == 'uninstall':
6957                                 if node1.operation == 'uninstall':
6958                                         return 0
6959                                 return -1
6960
6961                         node1_sys = node1 in deep_system_deps
6962                         node2_sys = node2 in deep_system_deps
6963                         if node1_sys != node2_sys:
6964                                 if node1_sys:
6965                                         return -1
6966                                 return 1
6967
6968                         return node_info[node2] - node_info[node1]
6969
6970                 mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
6971
6972         def altlist(self, reversed=False):
6973
6974                 while self._serialized_tasks_cache is None:
6975                         self._resolve_conflicts()
6976                         try:
6977                                 self._serialized_tasks_cache, self._scheduler_graph = \
6978                                         self._serialize_tasks()
6979                         except self._serialize_tasks_retry:
6980                                 pass
6981
6982                 retlist = self._serialized_tasks_cache[:]
6983                 if reversed:
6984                         retlist.reverse()
6985                 return retlist
6986
6987         def schedulerGraph(self):
6988                 """
6989                 The scheduler graph is identical to the normal one except that
6990                 uninstall edges are reversed in specific cases that require
6991                 conflicting packages to be temporarily installed simultaneously.
6992                 This is intended for use by the Scheduler in it's parallelization
6993                 logic. It ensures that temporary simultaneous installation of
6994                 conflicting packages is avoided when appropriate (especially for
6995                 !!atom blockers), but allowed in specific cases that require it.
6996
6997                 Note that this method calls break_refs() which alters the state of
6998                 internal Package instances such that this depgraph instance should
6999                 not be used to perform any more calculations.
7000                 """
7001                 if self._scheduler_graph is None:
7002                         self.altlist()
7003                 self.break_refs(self._scheduler_graph.order)
7004                 return self._scheduler_graph
7005
7006         def break_refs(self, nodes):
7007                 """
7008                 Take a mergelist like that returned from self.altlist() and
7009                 break any references that lead back to the depgraph. This is
7010                 useful if you want to hold references to packages without
7011                 also holding the depgraph on the heap.
7012                 """
7013                 for node in nodes:
7014                         if hasattr(node, "root_config"):
7015                                 # The FakeVartree references the _package_cache which
7016                                 # references the depgraph. So that Package instances don't
7017                                 # hold the depgraph and FakeVartree on the heap, replace
7018                                 # the RootConfig that references the FakeVartree with the
7019                                 # original RootConfig instance which references the actual
7020                                 # vartree.
7021                                 node.root_config = \
7022                                         self._trees_orig[node.root_config.root]["root_config"]
7023
7024         def _resolve_conflicts(self):
7025                 if not self._complete_graph():
7026                         raise self._unknown_internal_error()
7027
7028                 if not self.validate_blockers():
7029                         raise self._unknown_internal_error()
7030
7031                 if self._slot_collision_info:
7032                         self._process_slot_conflicts()
7033
7034         def _serialize_tasks(self):
7035
7036                 if "--debug" in self.myopts:
7037                         writemsg("\ndigraph:\n\n", noiselevel=-1)
7038                         self.digraph.debug_print()
7039                         writemsg("\n", noiselevel=-1)
7040
7041                 scheduler_graph = self.digraph.copy()
7042                 mygraph=self.digraph.copy()
7043                 # Prune "nomerge" root nodes if nothing depends on them, since
7044                 # otherwise they slow down merge order calculation. Don't remove
7045                 # non-root nodes since they help optimize merge order in some cases
7046                 # such as revdep-rebuild.
7047                 removed_nodes = set()
7048                 while True:
7049                         for node in mygraph.root_nodes():
7050                                 if not isinstance(node, Package) or \
7051                                         node.installed or node.onlydeps:
7052                                         removed_nodes.add(node)
7053                         if removed_nodes:
7054                                 self.spinner.update()
7055                                 mygraph.difference_update(removed_nodes)
7056                         if not removed_nodes:
7057                                 break
7058                         removed_nodes.clear()
7059                 self._merge_order_bias(mygraph)
7060                 def cmp_circular_bias(n1, n2):
7061                         """
7062                         RDEPEND is stronger than PDEPEND and this function
7063                         measures such a strength bias within a circular
7064                         dependency relationship.
7065                         """
7066                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
7067                                 ignore_priority=priority_range.ignore_medium_soft)
7068                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
7069                                 ignore_priority=priority_range.ignore_medium_soft)
7070                         if n1_n2_medium == n2_n1_medium:
7071                                 return 0
7072                         elif n1_n2_medium:
7073                                 return 1
7074                         return -1
7075                 myblocker_uninstalls = self._blocker_uninstalls.copy()
7076                 retlist=[]
7077                 # Contains uninstall tasks that have been scheduled to
7078                 # occur after overlapping blockers have been installed.
7079                 scheduled_uninstalls = set()
7080                 # Contains any Uninstall tasks that have been ignored
7081                 # in order to avoid the circular deps code path. These
7082                 # correspond to blocker conflicts that could not be
7083                 # resolved.
7084                 ignored_uninstall_tasks = set()
7085                 have_uninstall_task = False
7086                 complete = "complete" in self.myparams
7087                 asap_nodes = []
7088
7089                 def get_nodes(**kwargs):
7090                         """
7091                         Returns leaf nodes excluding Uninstall instances
7092                         since those should be executed as late as possible.
7093                         """
7094                         return [node for node in mygraph.leaf_nodes(**kwargs) \
7095                                 if isinstance(node, Package) and \
7096                                         (node.operation != "uninstall" or \
7097                                         node in scheduled_uninstalls)]
7098
7099                 # sys-apps/portage needs special treatment if ROOT="/"
7100                 running_root = self._running_root.root
7101                 from portage.const import PORTAGE_PACKAGE_ATOM
7102                 runtime_deps = InternalPackageSet(
7103                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
7104                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
7105                         PORTAGE_PACKAGE_ATOM)
7106                 replacement_portage = self.mydbapi[running_root].match_pkgs(
7107                         PORTAGE_PACKAGE_ATOM)
7108
7109                 if running_portage:
7110                         running_portage = running_portage[0]
7111                 else:
7112                         running_portage = None
7113
7114                 if replacement_portage:
7115                         replacement_portage = replacement_portage[0]
7116                 else:
7117                         replacement_portage = None
7118
7119                 if replacement_portage == running_portage:
7120                         replacement_portage = None
7121
7122                 if replacement_portage is not None:
7123                         # update from running_portage to replacement_portage asap
7124                         asap_nodes.append(replacement_portage)
7125
7126                 if running_portage is not None:
7127                         try:
7128                                 portage_rdepend = self._select_atoms_highest_available(
7129                                         running_root, running_portage.metadata["RDEPEND"],
7130                                         myuse=running_portage.use.enabled,
7131                                         parent=running_portage, strict=False)
7132                         except portage.exception.InvalidDependString, e:
7133                                 portage.writemsg("!!! Invalid RDEPEND in " + \
7134                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
7135                                         (running_root, running_portage.cpv, e), noiselevel=-1)
7136                                 del e
7137                                 portage_rdepend = []
7138                         runtime_deps.update(atom for atom in portage_rdepend \
7139                                 if not atom.startswith("!"))
7140
7141                 def gather_deps(ignore_priority, mergeable_nodes,
7142                         selected_nodes, node):
7143                         """
7144                         Recursively gather a group of nodes that RDEPEND on
7145                         eachother. This ensures that they are merged as a group
7146                         and get their RDEPENDs satisfied as soon as possible.
7147                         """
7148                         if node in selected_nodes:
7149                                 return True
7150                         if node not in mergeable_nodes:
7151                                 return False
7152                         if node == replacement_portage and \
7153                                 mygraph.child_nodes(node,
7154                                 ignore_priority=priority_range.ignore_medium_soft):
7155                                 # Make sure that portage always has all of it's
7156                                 # RDEPENDs installed first.
7157                                 return False
7158                         selected_nodes.add(node)
7159                         for child in mygraph.child_nodes(node,
7160                                 ignore_priority=ignore_priority):
7161                                 if not gather_deps(ignore_priority,
7162                                         mergeable_nodes, selected_nodes, child):
7163                                         return False
7164                         return True
7165
7166                 def ignore_uninst_or_med(priority):
7167                         if priority is BlockerDepPriority.instance:
7168                                 return True
7169                         return priority_range.ignore_medium(priority)
7170
7171                 def ignore_uninst_or_med_soft(priority):
7172                         if priority is BlockerDepPriority.instance:
7173                                 return True
7174                         return priority_range.ignore_medium_soft(priority)
7175
7176                 tree_mode = "--tree" in self.myopts
7177                 # Tracks whether or not the current iteration should prefer asap_nodes
7178                 # if available.  This is set to False when the previous iteration
7179                 # failed to select any nodes.  It is reset whenever nodes are
7180                 # successfully selected.
7181                 prefer_asap = True
7182
7183                 # Controls whether or not the current iteration should drop edges that
7184                 # are "satisfied" by installed packages, in order to solve circular
7185                 # dependencies. The deep runtime dependencies of installed packages are
7186                 # not checked in this case (bug #199856), so it must be avoided
7187                 # whenever possible.
7188                 drop_satisfied = False
7189
7190                 # State of variables for successive iterations that loosen the
7191                 # criteria for node selection.
7192                 #
7193                 # iteration   prefer_asap   drop_satisfied
7194                 # 1           True          False
7195                 # 2           False         False
7196                 # 3           False         True
7197                 #
7198                 # If no nodes are selected on the last iteration, it is due to
7199                 # unresolved blockers or circular dependencies.
7200
7201                 while not mygraph.empty():
7202                         self.spinner.update()
7203                         selected_nodes = None
7204                         ignore_priority = None
7205                         if drop_satisfied or (prefer_asap and asap_nodes):
7206                                 priority_range = DepPrioritySatisfiedRange
7207                         else:
7208                                 priority_range = DepPriorityNormalRange
7209                         if prefer_asap and asap_nodes:
7210                                 # ASAP nodes are merged before their soft deps. Go ahead and
7211                                 # select root nodes here if necessary, since it's typical for
7212                                 # the parent to have been removed from the graph already.
7213                                 asap_nodes = [node for node in asap_nodes \
7214                                         if mygraph.contains(node)]
7215                                 for node in asap_nodes:
7216                                         if not mygraph.child_nodes(node,
7217                                                 ignore_priority=priority_range.ignore_soft):
7218                                                 selected_nodes = [node]
7219                                                 asap_nodes.remove(node)
7220                                                 break
7221                         if not selected_nodes and \
7222                                 not (prefer_asap and asap_nodes):
7223                                 for i in xrange(priority_range.NONE,
7224                                         priority_range.MEDIUM_SOFT + 1):
7225                                         ignore_priority = priority_range.ignore_priority[i]
7226                                         nodes = get_nodes(ignore_priority=ignore_priority)
7227                                         if nodes:
7228                                                 # If there is a mix of uninstall nodes with other
7229                                                 # types, save the uninstall nodes for later since
7230                                                 # sometimes a merge node will render an uninstall
7231                                                 # node unnecessary (due to occupying the same slot),
7232                                                 # and we want to avoid executing a separate uninstall
7233                                                 # task in that case.
7234                                                 if len(nodes) > 1:
7235                                                         good_uninstalls = []
7236                                                         with_some_uninstalls_excluded = []
7237                                                         for node in nodes:
7238                                                                 if node.operation == "uninstall":
7239                                                                         slot_node = self.mydbapi[node.root
7240                                                                                 ].match_pkgs(node.slot_atom)
7241                                                                         if slot_node and \
7242                                                                                 slot_node[0].operation == "merge":
7243                                                                                 continue
7244                                                                         good_uninstalls.append(node)
7245                                                                 with_some_uninstalls_excluded.append(node)
7246                                                         if good_uninstalls:
7247                                                                 nodes = good_uninstalls
7248                                                         elif with_some_uninstalls_excluded:
7249                                                                 nodes = with_some_uninstalls_excluded
7250                                                         else:
7251                                                                 nodes = nodes
7252
7253                                                 if ignore_priority is None and not tree_mode:
7254                                                         # Greedily pop all of these nodes since no
7255                                                         # relationship has been ignored. This optimization
7256                                                         # destroys --tree output, so it's disabled in tree
7257                                                         # mode.
7258                                                         selected_nodes = nodes
7259                                                 else:
7260                                                         # For optimal merge order:
7261                                                         #  * Only pop one node.
7262                                                         #  * Removing a root node (node without a parent)
7263                                                         #    will not produce a leaf node, so avoid it.
7264                                                         #  * It's normal for a selected uninstall to be a
7265                                                         #    root node, so don't check them for parents.
7266                                                         for node in nodes:
7267                                                                 if node.operation == "uninstall" or \
7268                                                                         mygraph.parent_nodes(node):
7269                                                                         selected_nodes = [node]
7270                                                                         break
7271
7272                                                 if selected_nodes:
7273                                                         break
7274
7275                         if not selected_nodes:
7276                                 nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
7277                                 if nodes:
7278                                         mergeable_nodes = set(nodes)
7279                                         if prefer_asap and asap_nodes:
7280                                                 nodes = asap_nodes
7281                                         for i in xrange(priority_range.SOFT,
7282                                                 priority_range.MEDIUM_SOFT + 1):
7283                                                 ignore_priority = priority_range.ignore_priority[i]
7284                                                 for node in nodes:
7285                                                         if not mygraph.parent_nodes(node):
7286                                                                 continue
7287                                                         selected_nodes = set()
7288                                                         if gather_deps(ignore_priority,
7289                                                                 mergeable_nodes, selected_nodes, node):
7290                                                                 break
7291                                                         else:
7292                                                                 selected_nodes = None
7293                                                 if selected_nodes:
7294                                                         break
7295
7296                                         if prefer_asap and asap_nodes and not selected_nodes:
7297                                                 # We failed to find any asap nodes to merge, so ignore
7298                                                 # them for the next iteration.
7299                                                 prefer_asap = False
7300                                                 continue
7301
7302                         if selected_nodes and ignore_priority is not None:
7303                                 # Try to merge ignored medium_soft deps as soon as possible
7304                                 # if they're not satisfied by installed packages.
7305                                 for node in selected_nodes:
7306                                         children = set(mygraph.child_nodes(node))
7307                                         soft = children.difference(
7308                                                 mygraph.child_nodes(node,
7309                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
7310                                         medium_soft = children.difference(
7311                                                 mygraph.child_nodes(node,
7312                                                         ignore_priority = \
7313                                                         DepPrioritySatisfiedRange.ignore_medium_soft))
7314                                         medium_soft.difference_update(soft)
7315                                         for child in medium_soft:
7316                                                 if child in selected_nodes:
7317                                                         continue
7318                                                 if child in asap_nodes:
7319                                                         continue
7320                                                 asap_nodes.append(child)
7321
7322                         if selected_nodes and len(selected_nodes) > 1:
7323                                 if not isinstance(selected_nodes, list):
7324                                         selected_nodes = list(selected_nodes)
7325                                 selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
7326
7327                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7328                                 # An Uninstall task needs to be executed in order to
7329                                 # avoid conflict if possible.
7330
7331                                 if drop_satisfied:
7332                                         priority_range = DepPrioritySatisfiedRange
7333                                 else:
7334                                         priority_range = DepPriorityNormalRange
7335
7336                                 mergeable_nodes = get_nodes(
7337                                         ignore_priority=ignore_uninst_or_med)
7338
7339                                 min_parent_deps = None
7340                                 uninst_task = None
7341                                 for task in myblocker_uninstalls.leaf_nodes():
7342                                         # Do some sanity checks so that system or world packages
7343                                         # don't get uninstalled inappropriately here (only really
7344                                         # necessary when --complete-graph has not been enabled).
7345
7346                                         if task in ignored_uninstall_tasks:
7347                                                 continue
7348
7349                                         if task in scheduled_uninstalls:
7350                                                 # It's been scheduled but it hasn't
7351                                                 # been executed yet due to dependence
7352                                                 # on installation of blocking packages.
7353                                                 continue
7354
7355                                         root_config = self.roots[task.root]
7356                                         inst_pkg = self._pkg_cache[
7357                                                 ("installed", task.root, task.cpv, "nomerge")]
7358
7359                                         if self.digraph.contains(inst_pkg):
7360                                                 continue
7361
7362                                         forbid_overlap = False
7363                                         heuristic_overlap = False
7364                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7365                                                 if blocker.eapi in ("0", "1"):
7366                                                         heuristic_overlap = True
7367                                                 elif blocker.atom.blocker.overlap.forbid:
7368                                                         forbid_overlap = True
7369                                                         break
7370                                         if forbid_overlap and running_root == task.root:
7371                                                 continue
7372
7373                                         if heuristic_overlap and running_root == task.root:
7374                                                 # Never uninstall sys-apps/portage or it's essential
7375                                                 # dependencies, except through replacement.
7376                                                 try:
7377                                                         runtime_dep_atoms = \
7378                                                                 list(runtime_deps.iterAtomsForPackage(task))
7379                                                 except portage.exception.InvalidDependString, e:
7380                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7381                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7382                                                                 (task.root, task.cpv, e), noiselevel=-1)
7383                                                         del e
7384                                                         continue
7385
7386                                                 # Don't uninstall a runtime dep if it appears
7387                                                 # to be the only suitable one installed.
7388                                                 skip = False
7389                                                 vardb = root_config.trees["vartree"].dbapi
7390                                                 for atom in runtime_dep_atoms:
7391                                                         other_version = None
7392                                                         for pkg in vardb.match_pkgs(atom):
7393                                                                 if pkg.cpv == task.cpv and \
7394                                                                         pkg.metadata["COUNTER"] == \
7395                                                                         task.metadata["COUNTER"]:
7396                                                                         continue
7397                                                                 other_version = pkg
7398                                                                 break
7399                                                         if other_version is None:
7400                                                                 skip = True
7401                                                                 break
7402                                                 if skip:
7403                                                         continue
7404
7405                                                 # For packages in the system set, don't take
7406                                                 # any chances. If the conflict can't be resolved
7407                                                 # by a normal replacement operation then abort.
7408                                                 skip = False
7409                                                 try:
7410                                                         for atom in root_config.sets[
7411                                                                 "system"].iterAtomsForPackage(task):
7412                                                                 skip = True
7413                                                                 break
7414                                                 except portage.exception.InvalidDependString, e:
7415                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7416                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7417                                                                 (task.root, task.cpv, e), noiselevel=-1)
7418                                                         del e
7419                                                         skip = True
7420                                                 if skip:
7421                                                         continue
7422
7423                                         # Note that the world check isn't always
7424                                         # necessary since self._complete_graph() will
7425                                         # add all packages from the system and world sets to the
7426                                         # graph. This just allows unresolved conflicts to be
7427                                         # detected as early as possible, which makes it possible
7428                                         # to avoid calling self._complete_graph() when it is
7429                                         # unnecessary due to blockers triggering an abortion.
7430                                         if not complete:
7431                                                 # For packages in the world set, go ahead an uninstall
7432                                                 # when necessary, as long as the atom will be satisfied
7433                                                 # in the final state.
7434                                                 graph_db = self.mydbapi[task.root]
7435                                                 skip = False
7436                                                 try:
7437                                                         for atom in root_config.sets[
7438                                                                 "world"].iterAtomsForPackage(task):
7439                                                                 satisfied = False
7440                                                                 for pkg in graph_db.match_pkgs(atom):
7441                                                                         if pkg == inst_pkg:
7442                                                                                 continue
7443                                                                         satisfied = True
7444                                                                         break
7445                                                                 if not satisfied:
7446                                                                         skip = True
7447                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7448                                                                         break
7449                                                 except portage.exception.InvalidDependString, e:
7450                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7451                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7452                                                                 (task.root, task.cpv, e), noiselevel=-1)
7453                                                         del e
7454                                                         skip = True
7455                                                 if skip:
7456                                                         continue
7457
7458                                         # Check the deps of parent nodes to ensure that
7459                                         # the chosen task produces a leaf node. Maybe
7460                                         # this can be optimized some more to make the
7461                                         # best possible choice, but the current algorithm
7462                                         # is simple and should be near optimal for most
7463                                         # common cases.
7464                                         mergeable_parent = False
7465                                         parent_deps = set()
7466                                         for parent in mygraph.parent_nodes(task):
7467                                                 parent_deps.update(mygraph.child_nodes(parent,
7468                                                         ignore_priority=priority_range.ignore_medium_soft))
7469                                                 if parent in mergeable_nodes and \
7470                                                         gather_deps(ignore_uninst_or_med_soft,
7471                                                         mergeable_nodes, set(), parent):
7472                                                         mergeable_parent = True
7473
7474                                         if not mergeable_parent:
7475                                                 continue
7476
7477                                         parent_deps.remove(task)
7478                                         if min_parent_deps is None or \
7479                                                 len(parent_deps) < min_parent_deps:
7480                                                 min_parent_deps = len(parent_deps)
7481                                                 uninst_task = task
7482
7483                                 if uninst_task is not None:
7484                                         # The uninstall is performed only after blocking
7485                                         # packages have been merged on top of it. File
7486                                         # collisions between blocking packages are detected
7487                                         # and removed from the list of files to be uninstalled.
7488                                         scheduled_uninstalls.add(uninst_task)
7489                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7490
7491                                         # Reverse the parent -> uninstall edges since we want
7492                                         # to do the uninstall after blocking packages have
7493                                         # been merged on top of it.
7494                                         mygraph.remove(uninst_task)
7495                                         for blocked_pkg in parent_nodes:
7496                                                 mygraph.add(blocked_pkg, uninst_task,
7497                                                         priority=BlockerDepPriority.instance)
7498                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7499                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7500                                                         priority=BlockerDepPriority.instance)
7501
7502                                         # Reset the state variables for leaf node selection and
7503                                         # continue trying to select leaf nodes.
7504                                         prefer_asap = True
7505                                         drop_satisfied = False
7506                                         continue
7507
7508                         if not selected_nodes:
7509                                 # Only select root nodes as a last resort. This case should
7510                                 # only trigger when the graph is nearly empty and the only
7511                                 # remaining nodes are isolated (no parents or children). Since
7512                                 # the nodes must be isolated, ignore_priority is not needed.
7513                                 selected_nodes = get_nodes()
7514
7515                         if not selected_nodes and not drop_satisfied:
7516                                 drop_satisfied = True
7517                                 continue
7518
7519                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7520                                 # If possible, drop an uninstall task here in order to avoid
7521                                 # the circular deps code path. The corresponding blocker will
7522                                 # still be counted as an unresolved conflict.
7523                                 uninst_task = None
7524                                 for node in myblocker_uninstalls.leaf_nodes():
7525                                         try:
7526                                                 mygraph.remove(node)
7527                                         except KeyError:
7528                                                 pass
7529                                         else:
7530                                                 uninst_task = node
7531                                                 ignored_uninstall_tasks.add(node)
7532                                                 break
7533
7534                                 if uninst_task is not None:
7535                                         # Reset the state variables for leaf node selection and
7536                                         # continue trying to select leaf nodes.
7537                                         prefer_asap = True
7538                                         drop_satisfied = False
7539                                         continue
7540
7541                         if not selected_nodes:
7542                                 self._circular_deps_for_display = mygraph
7543                                 raise self._unknown_internal_error()
7544
7545                         # At this point, we've succeeded in selecting one or more nodes, so
7546                         # reset state variables for leaf node selection.
7547                         prefer_asap = True
7548                         drop_satisfied = False
7549
7550                         mygraph.difference_update(selected_nodes)
7551
7552                         for node in selected_nodes:
7553                                 if isinstance(node, Package) and \
7554                                         node.operation == "nomerge":
7555                                         continue
7556
7557                                 # Handle interactions between blockers
7558                                 # and uninstallation tasks.
7559                                 solved_blockers = set()
7560                                 uninst_task = None
7561                                 if isinstance(node, Package) and \
7562                                         "uninstall" == node.operation:
7563                                         have_uninstall_task = True
7564                                         uninst_task = node
7565                                 else:
7566                                         vardb = self.trees[node.root]["vartree"].dbapi
7567                                         previous_cpv = vardb.match(node.slot_atom)
7568                                         if previous_cpv:
7569                                                 # The package will be replaced by this one, so remove
7570                                                 # the corresponding Uninstall task if necessary.
7571                                                 previous_cpv = previous_cpv[0]
7572                                                 uninst_task = \
7573                                                         ("installed", node.root, previous_cpv, "uninstall")
7574                                                 try:
7575                                                         mygraph.remove(uninst_task)
7576                                                 except KeyError:
7577                                                         pass
7578
7579                                 if uninst_task is not None and \
7580                                         uninst_task not in ignored_uninstall_tasks and \
7581                                         myblocker_uninstalls.contains(uninst_task):
7582                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7583                                         myblocker_uninstalls.remove(uninst_task)
7584                                         # Discard any blockers that this Uninstall solves.
7585                                         for blocker in blocker_nodes:
7586                                                 if not myblocker_uninstalls.child_nodes(blocker):
7587                                                         myblocker_uninstalls.remove(blocker)
7588                                                         solved_blockers.add(blocker)
7589
7590                                 retlist.append(node)
7591
7592                                 if (isinstance(node, Package) and \
7593                                         "uninstall" == node.operation) or \
7594                                         (uninst_task is not None and \
7595                                         uninst_task in scheduled_uninstalls):
7596                                         # Include satisfied blockers in the merge list
7597                                         # since the user might be interested and also
7598                                         # it serves as an indicator that blocking packages
7599                                         # will be temporarily installed simultaneously.
7600                                         for blocker in solved_blockers:
7601                                                 retlist.append(Blocker(atom=blocker.atom,
7602                                                         root=blocker.root, eapi=blocker.eapi,
7603                                                         satisfied=True))
7604
7605                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7606                 for node in myblocker_uninstalls.root_nodes():
7607                         unsolvable_blockers.add(node)
7608
7609                 for blocker in unsolvable_blockers:
7610                         retlist.append(blocker)
7611
7612                 # If any Uninstall tasks need to be executed in order
7613                 # to avoid a conflict, complete the graph with any
7614                 # dependencies that may have been initially
7615                 # neglected (to ensure that unsafe Uninstall tasks
7616                 # are properly identified and blocked from execution).
7617                 if have_uninstall_task and \
7618                         not complete and \
7619                         not unsolvable_blockers:
7620                         self.myparams.add("complete")
7621                         raise self._serialize_tasks_retry("")
7622
7623                 if unsolvable_blockers and \
7624                         not self._accept_blocker_conflicts():
7625                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7626                         self._serialized_tasks_cache = retlist[:]
7627                         self._scheduler_graph = scheduler_graph
7628                         raise self._unknown_internal_error()
7629
7630                 if self._slot_collision_info and \
7631                         not self._accept_blocker_conflicts():
7632                         self._serialized_tasks_cache = retlist[:]
7633                         self._scheduler_graph = scheduler_graph
7634                         raise self._unknown_internal_error()
7635
7636                 return retlist, scheduler_graph
7637
7638         def _show_circular_deps(self, mygraph):
7639                 # No leaf nodes are available, so we have a circular
7640                 # dependency panic situation.  Reduce the noise level to a
7641                 # minimum via repeated elimination of root nodes since they
7642                 # have no parents and thus can not be part of a cycle.
7643                 while True:
7644                         root_nodes = mygraph.root_nodes(
7645                                 ignore_priority=DepPrioritySatisfiedRange.ignore_medium_soft)
7646                         if not root_nodes:
7647                                 break
7648                         mygraph.difference_update(root_nodes)
7649                 # Display the USE flags that are enabled on nodes that are part
7650                 # of dependency cycles in case that helps the user decide to
7651                 # disable some of them.
7652                 display_order = []
7653                 tempgraph = mygraph.copy()
7654                 while not tempgraph.empty():
7655                         nodes = tempgraph.leaf_nodes()
7656                         if not nodes:
7657                                 node = tempgraph.order[0]
7658                         else:
7659                                 node = nodes[0]
7660                         display_order.append(node)
7661                         tempgraph.remove(node)
7662                 display_order.reverse()
7663                 self.myopts.pop("--quiet", None)
7664                 self.myopts.pop("--verbose", None)
7665                 self.myopts["--tree"] = True
7666                 portage.writemsg("\n\n", noiselevel=-1)
7667                 self.display(display_order)
7668                 prefix = colorize("BAD", " * ")
7669                 portage.writemsg("\n", noiselevel=-1)
7670                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7671                         noiselevel=-1)
7672                 portage.writemsg("\n", noiselevel=-1)
7673                 mygraph.debug_print()
7674                 portage.writemsg("\n", noiselevel=-1)
7675                 portage.writemsg(prefix + "Note that circular dependencies " + \
7676                         "can often be avoided by temporarily\n", noiselevel=-1)
7677                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7678                         "optional dependencies.\n", noiselevel=-1)
7679
7680         def _show_merge_list(self):
7681                 if self._serialized_tasks_cache is not None and \
7682                         not (self._displayed_list and \
7683                         (self._displayed_list == self._serialized_tasks_cache or \
7684                         self._displayed_list == \
7685                                 list(reversed(self._serialized_tasks_cache)))):
7686                         display_list = self._serialized_tasks_cache[:]
7687                         if "--tree" in self.myopts:
7688                                 display_list.reverse()
7689                         self.display(display_list)
7690
7691         def _show_unsatisfied_blockers(self, blockers):
7692                 self._show_merge_list()
7693                 msg = "Error: The above package list contains " + \
7694                         "packages which cannot be installed " + \
7695                         "at the same time on the same system."
7696                 prefix = colorize("BAD", " * ")
7697                 from textwrap import wrap
7698                 portage.writemsg("\n", noiselevel=-1)
7699                 for line in wrap(msg, 70):
7700                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7701
7702                 # Display the conflicting packages along with the packages
7703                 # that pulled them in. This is helpful for troubleshooting
7704                 # cases in which blockers don't solve automatically and
7705                 # the reasons are not apparent from the normal merge list
7706                 # display.
7707
7708                 conflict_pkgs = {}
7709                 for blocker in blockers:
7710                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7711                                 self._blocker_parents.parent_nodes(blocker)):
7712                                 parent_atoms = self._parent_atoms.get(pkg)
7713                                 if not parent_atoms:
7714                                         atom = self._blocked_world_pkgs.get(pkg)
7715                                         if atom is not None:
7716                                                 parent_atoms = set([("@world", atom)])
7717                                 if parent_atoms:
7718                                         conflict_pkgs[pkg] = parent_atoms
7719
7720                 if conflict_pkgs:
7721                         # Reduce noise by pruning packages that are only
7722                         # pulled in by other conflict packages.
7723                         pruned_pkgs = set()
7724                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7725                                 relevant_parent = False
7726                                 for parent, atom in parent_atoms:
7727                                         if parent not in conflict_pkgs:
7728                                                 relevant_parent = True
7729                                                 break
7730                                 if not relevant_parent:
7731                                         pruned_pkgs.add(pkg)
7732                         for pkg in pruned_pkgs:
7733                                 del conflict_pkgs[pkg]
7734
7735                 if conflict_pkgs:
7736                         msg = []
7737                         msg.append("\n")
7738                         indent = "  "
7739                         # Max number of parents shown, to avoid flooding the display.
7740                         max_parents = 3
7741                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7742
7743                                 pruned_list = set()
7744
7745                                 # Prefer packages that are not directly involved in a conflict.
7746                                 for parent_atom in parent_atoms:
7747                                         if len(pruned_list) >= max_parents:
7748                                                 break
7749                                         parent, atom = parent_atom
7750                                         if parent not in conflict_pkgs:
7751                                                 pruned_list.add(parent_atom)
7752
7753                                 for parent_atom in parent_atoms:
7754                                         if len(pruned_list) >= max_parents:
7755                                                 break
7756                                         pruned_list.add(parent_atom)
7757
7758                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7759                                 msg.append(indent + "%s pulled in by\n" % pkg)
7760
7761                                 for parent_atom in pruned_list:
7762                                         parent, atom = parent_atom
7763                                         msg.append(2*indent)
7764                                         if isinstance(parent,
7765                                                 (PackageArg, AtomArg)):
7766                                                 # For PackageArg and AtomArg types, it's
7767                                                 # redundant to display the atom attribute.
7768                                                 msg.append(str(parent))
7769                                         else:
7770                                                 # Display the specific atom from SetArg or
7771                                                 # Package types.
7772                                                 msg.append("%s required by %s" % (atom, parent))
7773                                         msg.append("\n")
7774
7775                                 if omitted_parents:
7776                                         msg.append(2*indent)
7777                                         msg.append("(and %d more)\n" % omitted_parents)
7778
7779                                 msg.append("\n")
7780
7781                         sys.stderr.write("".join(msg))
7782                         sys.stderr.flush()
7783
7784                 if "--quiet" not in self.myopts:
7785                         show_blocker_docs_link()
7786
7787         def display(self, mylist, favorites=[], verbosity=None):
7788
7789                 # This is used to prevent display_problems() from
7790                 # redundantly displaying this exact same merge list
7791                 # again via _show_merge_list().
7792                 self._displayed_list = mylist
7793
7794                 if verbosity is None:
7795                         verbosity = ("--quiet" in self.myopts and 1 or \
7796                                 "--verbose" in self.myopts and 3 or 2)
7797                 favorites_set = InternalPackageSet(favorites)
7798                 oneshot = "--oneshot" in self.myopts or \
7799                         "--onlydeps" in self.myopts
7800                 columns = "--columns" in self.myopts
7801                 changelogs=[]
7802                 p=[]
7803                 blockers = []
7804
7805                 counters = PackageCounters()
7806
7807                 if verbosity == 1 and "--verbose" not in self.myopts:
7808                         def create_use_string(*args):
7809                                 return ""
7810                 else:
7811                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7812                                 old_iuse, old_use,
7813                                 is_new, reinst_flags,
7814                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7815                                 alphabetical=("--alphabetical" in self.myopts)):
7816                                 enabled = []
7817                                 if alphabetical:
7818                                         disabled = enabled
7819                                         removed = enabled
7820                                 else:
7821                                         disabled = []
7822                                         removed = []
7823                                 cur_iuse = set(cur_iuse)
7824                                 enabled_flags = cur_iuse.intersection(cur_use)
7825                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7826                                 any_iuse = cur_iuse.union(old_iuse)
7827                                 any_iuse = list(any_iuse)
7828                                 any_iuse.sort()
7829                                 for flag in any_iuse:
7830                                         flag_str = None
7831                                         isEnabled = False
7832                                         reinst_flag = reinst_flags and flag in reinst_flags
7833                                         if flag in enabled_flags:
7834                                                 isEnabled = True
7835                                                 if is_new or flag in old_use and \
7836                                                         (all_flags or reinst_flag):
7837                                                         flag_str = red(flag)
7838                                                 elif flag not in old_iuse:
7839                                                         flag_str = yellow(flag) + "%*"
7840                                                 elif flag not in old_use:
7841                                                         flag_str = green(flag) + "*"
7842                                         elif flag in removed_iuse:
7843                                                 if all_flags or reinst_flag:
7844                                                         flag_str = yellow("-" + flag) + "%"
7845                                                         if flag in old_use:
7846                                                                 flag_str += "*"
7847                                                         flag_str = "(" + flag_str + ")"
7848                                                         removed.append(flag_str)
7849                                                 continue
7850                                         else:
7851                                                 if is_new or flag in old_iuse and \
7852                                                         flag not in old_use and \
7853                                                         (all_flags or reinst_flag):
7854                                                         flag_str = blue("-" + flag)
7855                                                 elif flag not in old_iuse:
7856                                                         flag_str = yellow("-" + flag)
7857                                                         if flag not in iuse_forced:
7858                                                                 flag_str += "%"
7859                                                 elif flag in old_use:
7860                                                         flag_str = green("-" + flag) + "*"
7861                                         if flag_str:
7862                                                 if flag in iuse_forced:
7863                                                         flag_str = "(" + flag_str + ")"
7864                                                 if isEnabled:
7865                                                         enabled.append(flag_str)
7866                                                 else:
7867                                                         disabled.append(flag_str)
7868
7869                                 if alphabetical:
7870                                         ret = " ".join(enabled)
7871                                 else:
7872                                         ret = " ".join(enabled + disabled + removed)
7873                                 if ret:
7874                                         ret = '%s="%s" ' % (name, ret)
7875                                 return ret
7876
7877                 repo_display = RepoDisplay(self.roots)
7878
7879                 tree_nodes = []
7880                 display_list = []
7881                 mygraph = self.digraph.copy()
7882
7883                 # If there are any Uninstall instances, add the corresponding
7884                 # blockers to the digraph (useful for --tree display).
7885
7886                 executed_uninstalls = set(node for node in mylist \
7887                         if isinstance(node, Package) and node.operation == "unmerge")
7888
7889                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7890                         uninstall_parents = \
7891                                 self._blocker_uninstalls.parent_nodes(uninstall)
7892                         if not uninstall_parents:
7893                                 continue
7894
7895                         # Remove the corresponding "nomerge" node and substitute
7896                         # the Uninstall node.
7897                         inst_pkg = self._pkg_cache[
7898                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7899                         try:
7900                                 mygraph.remove(inst_pkg)
7901                         except KeyError:
7902                                 pass
7903
7904                         try:
7905                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7906                         except KeyError:
7907                                 inst_pkg_blockers = []
7908
7909                         # Break the Package -> Uninstall edges.
7910                         mygraph.remove(uninstall)
7911
7912                         # Resolution of a package's blockers
7913                         # depend on it's own uninstallation.
7914                         for blocker in inst_pkg_blockers:
7915                                 mygraph.add(uninstall, blocker)
7916
7917                         # Expand Package -> Uninstall edges into
7918                         # Package -> Blocker -> Uninstall edges.
7919                         for blocker in uninstall_parents:
7920                                 mygraph.add(uninstall, blocker)
7921                                 for parent in self._blocker_parents.parent_nodes(blocker):
7922                                         if parent != inst_pkg:
7923                                                 mygraph.add(blocker, parent)
7924
7925                         # If the uninstall task did not need to be executed because
7926                         # of an upgrade, display Blocker -> Upgrade edges since the
7927                         # corresponding Blocker -> Uninstall edges will not be shown.
7928                         upgrade_node = \
7929                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7930                         if upgrade_node is not None and \
7931                                 uninstall not in executed_uninstalls:
7932                                 for blocker in uninstall_parents:
7933                                         mygraph.add(upgrade_node, blocker)
7934
7935                 unsatisfied_blockers = []
7936                 i = 0
7937                 depth = 0
7938                 shown_edges = set()
7939                 for x in mylist:
7940                         if isinstance(x, Blocker) and not x.satisfied:
7941                                 unsatisfied_blockers.append(x)
7942                                 continue
7943                         graph_key = x
7944                         if "--tree" in self.myopts:
7945                                 depth = len(tree_nodes)
7946                                 while depth and graph_key not in \
7947                                         mygraph.child_nodes(tree_nodes[depth-1]):
7948                                                 depth -= 1
7949                                 if depth:
7950                                         tree_nodes = tree_nodes[:depth]
7951                                         tree_nodes.append(graph_key)
7952                                         display_list.append((x, depth, True))
7953                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7954                                 else:
7955                                         traversed_nodes = set() # prevent endless circles
7956                                         traversed_nodes.add(graph_key)
7957                                         def add_parents(current_node, ordered):
7958                                                 parent_nodes = None
7959                                                 # Do not traverse to parents if this node is an
7960                                                 # an argument or a direct member of a set that has
7961                                                 # been specified as an argument (system or world).
7962                                                 if current_node not in self._set_nodes:
7963                                                         parent_nodes = mygraph.parent_nodes(current_node)
7964                                                 if parent_nodes:
7965                                                         child_nodes = set(mygraph.child_nodes(current_node))
7966                                                         selected_parent = None
7967                                                         # First, try to avoid a direct cycle.
7968                                                         for node in parent_nodes:
7969                                                                 if not isinstance(node, (Blocker, Package)):
7970                                                                         continue
7971                                                                 if node not in traversed_nodes and \
7972                                                                         node not in child_nodes:
7973                                                                         edge = (current_node, node)
7974                                                                         if edge in shown_edges:
7975                                                                                 continue
7976                                                                         selected_parent = node
7977                                                                         break
7978                                                         if not selected_parent:
7979                                                                 # A direct cycle is unavoidable.
7980                                                                 for node in parent_nodes:
7981                                                                         if not isinstance(node, (Blocker, Package)):
7982                                                                                 continue
7983                                                                         if node not in traversed_nodes:
7984                                                                                 edge = (current_node, node)
7985                                                                                 if edge in shown_edges:
7986                                                                                         continue
7987                                                                                 selected_parent = node
7988                                                                                 break
7989                                                         if selected_parent:
7990                                                                 shown_edges.add((current_node, selected_parent))
7991                                                                 traversed_nodes.add(selected_parent)
7992                                                                 add_parents(selected_parent, False)
7993                                                 display_list.append((current_node,
7994                                                         len(tree_nodes), ordered))
7995                                                 tree_nodes.append(current_node)
7996                                         tree_nodes = []
7997                                         add_parents(graph_key, True)
7998                         else:
7999                                 display_list.append((x, depth, True))
8000                 mylist = display_list
8001                 for x in unsatisfied_blockers:
8002                         mylist.append((x, 0, True))
8003
8004                 last_merge_depth = 0
8005                 for i in xrange(len(mylist)-1,-1,-1):
8006                         graph_key, depth, ordered = mylist[i]
8007                         if not ordered and depth == 0 and i > 0 \
8008                                 and graph_key == mylist[i-1][0] and \
8009                                 mylist[i-1][1] == 0:
8010                                 # An ordered node got a consecutive duplicate when the tree was
8011                                 # being filled in.
8012                                 del mylist[i]
8013                                 continue
8014                         if ordered and graph_key[-1] != "nomerge":
8015                                 last_merge_depth = depth
8016                                 continue
8017                         if depth >= last_merge_depth or \
8018                                 i < len(mylist) - 1 and \
8019                                 depth >= mylist[i+1][1]:
8020                                         del mylist[i]
8021
8022                 from portage import flatten
8023                 from portage.dep import use_reduce, paren_reduce
8024                 # files to fetch list - avoids counting a same file twice
8025                 # in size display (verbose mode)
8026                 myfetchlist=[]
8027
8028                 # Use this set to detect when all the "repoadd" strings are "[0]"
8029                 # and disable the entire repo display in this case.
8030                 repoadd_set = set()
8031
8032                 for mylist_index in xrange(len(mylist)):
8033                         x, depth, ordered = mylist[mylist_index]
8034                         pkg_type = x[0]
8035                         myroot = x[1]
8036                         pkg_key = x[2]
8037                         portdb = self.trees[myroot]["porttree"].dbapi
8038                         bindb  = self.trees[myroot]["bintree"].dbapi
8039                         vardb = self.trees[myroot]["vartree"].dbapi
8040                         vartree = self.trees[myroot]["vartree"]
8041                         pkgsettings = self.pkgsettings[myroot]
8042
8043                         fetch=" "
8044                         indent = " " * depth
8045
8046                         if isinstance(x, Blocker):
8047                                 if x.satisfied:
8048                                         blocker_style = "PKG_BLOCKER_SATISFIED"
8049                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
8050                                 else:
8051                                         blocker_style = "PKG_BLOCKER"
8052                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
8053                                 if ordered:
8054                                         counters.blocks += 1
8055                                         if x.satisfied:
8056                                                 counters.blocks_satisfied += 1
8057                                 resolved = portage.key_expand(
8058                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
8059                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
8060                                         addl += " " + colorize(blocker_style, resolved)
8061                                 else:
8062                                         addl = "[%s %s] %s%s" % \
8063                                                 (colorize(blocker_style, "blocks"),
8064                                                 addl, indent, colorize(blocker_style, resolved))
8065                                 block_parents = self._blocker_parents.parent_nodes(x)
8066                                 block_parents = set([pnode[2] for pnode in block_parents])
8067                                 block_parents = ", ".join(block_parents)
8068                                 if resolved!=x[2]:
8069                                         addl += colorize(blocker_style,
8070                                                 " (\"%s\" is blocking %s)") % \
8071                                                 (str(x.atom).lstrip("!"), block_parents)
8072                                 else:
8073                                         addl += colorize(blocker_style,
8074                                                 " (is blocking %s)") % block_parents
8075                                 if isinstance(x, Blocker) and x.satisfied:
8076                                         if columns:
8077                                                 continue
8078                                         p.append(addl)
8079                                 else:
8080                                         blockers.append(addl)
8081                         else:
8082                                 pkg_status = x[3]
8083                                 pkg_merge = ordered and pkg_status == "merge"
8084                                 if not pkg_merge and pkg_status == "merge":
8085                                         pkg_status = "nomerge"
8086                                 built = pkg_type != "ebuild"
8087                                 installed = pkg_type == "installed"
8088                                 pkg = x
8089                                 metadata = pkg.metadata
8090                                 ebuild_path = None
8091                                 repo_name = metadata["repository"]
8092                                 if pkg_type == "ebuild":
8093                                         ebuild_path = portdb.findname(pkg_key)
8094                                         if not ebuild_path: # shouldn't happen
8095                                                 raise portage.exception.PackageNotFound(pkg_key)
8096                                         repo_path_real = os.path.dirname(os.path.dirname(
8097                                                 os.path.dirname(ebuild_path)))
8098                                 else:
8099                                         repo_path_real = portdb.getRepositoryPath(repo_name)
8100                                 pkg_use = list(pkg.use.enabled)
8101                                 try:
8102                                         restrict = flatten(use_reduce(paren_reduce(
8103                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
8104                                 except portage.exception.InvalidDependString, e:
8105                                         if not pkg.installed:
8106                                                 show_invalid_depstring_notice(x,
8107                                                         pkg.metadata["RESTRICT"], str(e))
8108                                                 del e
8109                                                 return 1
8110                                         restrict = []
8111                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
8112                                         "fetch" in restrict:
8113                                         fetch = red("F")
8114                                         if ordered:
8115                                                 counters.restrict_fetch += 1
8116                                         if portdb.fetch_check(pkg_key, pkg_use):
8117                                                 fetch = green("f")
8118                                                 if ordered:
8119                                                         counters.restrict_fetch_satisfied += 1
8120
8121                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
8122                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
8123                                 myoldbest = []
8124                                 myinslotlist = None
8125                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
8126                                 if vardb.cpv_exists(pkg_key):
8127                                         addl="  "+yellow("R")+fetch+"  "
8128                                         if ordered:
8129                                                 if pkg_merge:
8130                                                         counters.reinst += 1
8131                                                 elif pkg_status == "uninstall":
8132                                                         counters.uninst += 1
8133                                 # filter out old-style virtual matches
8134                                 elif installed_versions and \
8135                                         portage.cpv_getkey(installed_versions[0]) == \
8136                                         portage.cpv_getkey(pkg_key):
8137                                         myinslotlist = vardb.match(pkg.slot_atom)
8138                                         # If this is the first install of a new-style virtual, we
8139                                         # need to filter out old-style virtual matches.
8140                                         if myinslotlist and \
8141                                                 portage.cpv_getkey(myinslotlist[0]) != \
8142                                                 portage.cpv_getkey(pkg_key):
8143                                                 myinslotlist = None
8144                                         if myinslotlist:
8145                                                 myoldbest = myinslotlist[:]
8146                                                 addl = "   " + fetch
8147                                                 if not portage.dep.cpvequal(pkg_key,
8148                                                         portage.best([pkg_key] + myoldbest)):
8149                                                         # Downgrade in slot
8150                                                         addl += turquoise("U")+blue("D")
8151                                                         if ordered:
8152                                                                 counters.downgrades += 1
8153                                                 else:
8154                                                         # Update in slot
8155                                                         addl += turquoise("U") + " "
8156                                                         if ordered:
8157                                                                 counters.upgrades += 1
8158                                         else:
8159                                                 # New slot, mark it new.
8160                                                 addl = " " + green("NS") + fetch + "  "
8161                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
8162                                                 if ordered:
8163                                                         counters.newslot += 1
8164
8165                                         if "--changelog" in self.myopts:
8166                                                 inst_matches = vardb.match(pkg.slot_atom)
8167                                                 if inst_matches:
8168                                                         changelogs.extend(self.calc_changelog(
8169                                                                 portdb.findname(pkg_key),
8170                                                                 inst_matches[0], pkg_key))
8171                                 else:
8172                                         addl = " " + green("N") + " " + fetch + "  "
8173                                         if ordered:
8174                                                 counters.new += 1
8175
8176                                 verboseadd = ""
8177                                 repoadd = None
8178
8179                                 if True:
8180                                         # USE flag display
8181                                         forced_flags = set()
8182                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
8183                                         forced_flags.update(pkgsettings.useforce)
8184                                         forced_flags.update(pkgsettings.usemask)
8185
8186                                         cur_use = [flag for flag in pkg.use.enabled \
8187                                                 if flag in pkg.iuse.all]
8188                                         cur_iuse = sorted(pkg.iuse.all)
8189
8190                                         if myoldbest and myinslotlist:
8191                                                 previous_cpv = myoldbest[0]
8192                                         else:
8193                                                 previous_cpv = pkg.cpv
8194                                         if vardb.cpv_exists(previous_cpv):
8195                                                 old_iuse, old_use = vardb.aux_get(
8196                                                                 previous_cpv, ["IUSE", "USE"])
8197                                                 old_iuse = list(set(
8198                                                         filter_iuse_defaults(old_iuse.split())))
8199                                                 old_iuse.sort()
8200                                                 old_use = old_use.split()
8201                                                 is_new = False
8202                                         else:
8203                                                 old_iuse = []
8204                                                 old_use = []
8205                                                 is_new = True
8206
8207                                         old_use = [flag for flag in old_use if flag in old_iuse]
8208
8209                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
8210                                         use_expand.sort()
8211                                         use_expand.reverse()
8212                                         use_expand_hidden = \
8213                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
8214
8215                                         def map_to_use_expand(myvals, forcedFlags=False,
8216                                                 removeHidden=True):
8217                                                 ret = {}
8218                                                 forced = {}
8219                                                 for exp in use_expand:
8220                                                         ret[exp] = []
8221                                                         forced[exp] = set()
8222                                                         for val in myvals[:]:
8223                                                                 if val.startswith(exp.lower()+"_"):
8224                                                                         if val in forced_flags:
8225                                                                                 forced[exp].add(val[len(exp)+1:])
8226                                                                         ret[exp].append(val[len(exp)+1:])
8227                                                                         myvals.remove(val)
8228                                                 ret["USE"] = myvals
8229                                                 forced["USE"] = [val for val in myvals \
8230                                                         if val in forced_flags]
8231                                                 if removeHidden:
8232                                                         for exp in use_expand_hidden:
8233                                                                 ret.pop(exp, None)
8234                                                 if forcedFlags:
8235                                                         return ret, forced
8236                                                 return ret
8237
8238                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
8239                                         # are the only thing that triggered reinstallation.
8240                                         reinst_flags_map = {}
8241                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
8242                                         reinst_expand_map = None
8243                                         if reinstall_for_flags:
8244                                                 reinst_flags_map = map_to_use_expand(
8245                                                         list(reinstall_for_flags), removeHidden=False)
8246                                                 for k in list(reinst_flags_map):
8247                                                         if not reinst_flags_map[k]:
8248                                                                 del reinst_flags_map[k]
8249                                                 if not reinst_flags_map.get("USE"):
8250                                                         reinst_expand_map = reinst_flags_map.copy()
8251                                                         reinst_expand_map.pop("USE", None)
8252                                         if reinst_expand_map and \
8253                                                 not set(reinst_expand_map).difference(
8254                                                 use_expand_hidden):
8255                                                 use_expand_hidden = \
8256                                                         set(use_expand_hidden).difference(
8257                                                         reinst_expand_map)
8258
8259                                         cur_iuse_map, iuse_forced = \
8260                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
8261                                         cur_use_map = map_to_use_expand(cur_use)
8262                                         old_iuse_map = map_to_use_expand(old_iuse)
8263                                         old_use_map = map_to_use_expand(old_use)
8264
8265                                         use_expand.sort()
8266                                         use_expand.insert(0, "USE")
8267                                         
8268                                         for key in use_expand:
8269                                                 if key in use_expand_hidden:
8270                                                         continue
8271                                                 verboseadd += create_use_string(key.upper(),
8272                                                         cur_iuse_map[key], iuse_forced[key],
8273                                                         cur_use_map[key], old_iuse_map[key],
8274                                                         old_use_map[key], is_new,
8275                                                         reinst_flags_map.get(key))
8276
8277                                 if verbosity == 3:
8278                                         # size verbose
8279                                         mysize=0
8280                                         if pkg_type == "ebuild" and pkg_merge:
8281                                                 try:
8282                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
8283                                                                 useflags=pkg_use, debug=self.edebug)
8284                                                 except portage.exception.InvalidDependString, e:
8285                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
8286                                                         show_invalid_depstring_notice(x, src_uri, str(e))
8287                                                         del e
8288                                                         return 1
8289                                                 if myfilesdict is None:
8290                                                         myfilesdict="[empty/missing/bad digest]"
8291                                                 else:
8292                                                         for myfetchfile in myfilesdict:
8293                                                                 if myfetchfile not in myfetchlist:
8294                                                                         mysize+=myfilesdict[myfetchfile]
8295                                                                         myfetchlist.append(myfetchfile)
8296                                                         if ordered:
8297                                                                 counters.totalsize += mysize
8298                                                 verboseadd += format_size(mysize)
8299
8300                                         # overlay verbose
8301                                         # assign index for a previous version in the same slot
8302                                         has_previous = False
8303                                         repo_name_prev = None
8304                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
8305                                                 metadata["SLOT"])
8306                                         slot_matches = vardb.match(slot_atom)
8307                                         if slot_matches:
8308                                                 has_previous = True
8309                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
8310                                                         ["repository"])[0]
8311
8312                                         # now use the data to generate output
8313                                         if pkg.installed or not has_previous:
8314                                                 repoadd = repo_display.repoStr(repo_path_real)
8315                                         else:
8316                                                 repo_path_prev = None
8317                                                 if repo_name_prev:
8318                                                         repo_path_prev = portdb.getRepositoryPath(
8319                                                                 repo_name_prev)
8320                                                 if repo_path_prev == repo_path_real:
8321                                                         repoadd = repo_display.repoStr(repo_path_real)
8322                                                 else:
8323                                                         repoadd = "%s=>%s" % (
8324                                                                 repo_display.repoStr(repo_path_prev),
8325                                                                 repo_display.repoStr(repo_path_real))
8326                                         if repoadd:
8327                                                 repoadd_set.add(repoadd)
8328
8329                                 xs = [portage.cpv_getkey(pkg_key)] + \
8330                                         list(portage.catpkgsplit(pkg_key)[2:])
8331                                 if xs[2] == "r0":
8332                                         xs[2] = ""
8333                                 else:
8334                                         xs[2] = "-" + xs[2]
8335
8336                                 mywidth = 130
8337                                 if "COLUMNWIDTH" in self.settings:
8338                                         try:
8339                                                 mywidth = int(self.settings["COLUMNWIDTH"])
8340                                         except ValueError, e:
8341                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
8342                                                 portage.writemsg(
8343                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
8344                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
8345                                                 del e
8346                                 oldlp = mywidth - 30
8347                                 newlp = oldlp - 30
8348
8349                                 # Convert myoldbest from a list to a string.
8350                                 if not myoldbest:
8351                                         myoldbest = ""
8352                                 else:
8353                                         for pos, key in enumerate(myoldbest):
8354                                                 key = portage.catpkgsplit(key)[2] + \
8355                                                         "-" + portage.catpkgsplit(key)[3]
8356                                                 if key[-3:] == "-r0":
8357                                                         key = key[:-3]
8358                                                 myoldbest[pos] = key
8359                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8360
8361                                 pkg_cp = xs[0]
8362                                 root_config = self.roots[myroot]
8363                                 system_set = root_config.sets["system"]
8364                                 world_set  = root_config.sets["world"]
8365
8366                                 pkg_system = False
8367                                 pkg_world = False
8368                                 try:
8369                                         pkg_system = system_set.findAtomForPackage(pkg)
8370                                         pkg_world  = world_set.findAtomForPackage(pkg)
8371                                         if not (oneshot or pkg_world) and \
8372                                                 myroot == self.target_root and \
8373                                                 favorites_set.findAtomForPackage(pkg):
8374                                                 # Maybe it will be added to world now.
8375                                                 if create_world_atom(pkg, favorites_set, root_config):
8376                                                         pkg_world = True
8377                                 except portage.exception.InvalidDependString:
8378                                         # This is reported elsewhere if relevant.
8379                                         pass
8380
8381                                 def pkgprint(pkg_str):
8382                                         if pkg_merge:
8383                                                 if pkg_system:
8384                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8385                                                 elif pkg_world:
8386                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8387                                                 else:
8388                                                         return colorize("PKG_MERGE", pkg_str)
8389                                         elif pkg_status == "uninstall":
8390                                                 return colorize("PKG_UNINSTALL", pkg_str)
8391                                         else:
8392                                                 if pkg_system:
8393                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8394                                                 elif pkg_world:
8395                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8396                                                 else:
8397                                                         return colorize("PKG_NOMERGE", pkg_str)
8398
8399                                 try:
8400                                         properties = flatten(use_reduce(paren_reduce(
8401                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8402                                 except portage.exception.InvalidDependString, e:
8403                                         if not pkg.installed:
8404                                                 show_invalid_depstring_notice(pkg,
8405                                                         pkg.metadata["PROPERTIES"], str(e))
8406                                                 del e
8407                                                 return 1
8408                                         properties = []
8409                                 interactive = "interactive" in properties
8410                                 if interactive and pkg.operation == "merge":
8411                                         addl = colorize("WARN", "I") + addl[1:]
8412                                         if ordered:
8413                                                 counters.interactive += 1
8414
8415                                 if x[1]!="/":
8416                                         if myoldbest:
8417                                                 myoldbest +=" "
8418                                         if "--columns" in self.myopts:
8419                                                 if "--quiet" in self.myopts:
8420                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8421                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8422                                                         myprint=myprint+myoldbest
8423                                                         myprint=myprint+darkgreen("to "+x[1])
8424                                                         verboseadd = None
8425                                                 else:
8426                                                         if not pkg_merge:
8427                                                                 myprint = "[%s] %s%s" % \
8428                                                                         (pkgprint(pkg_status.ljust(13)),
8429                                                                         indent, pkgprint(pkg.cp))
8430                                                         else:
8431                                                                 myprint = "[%s %s] %s%s" % \
8432                                                                         (pkgprint(pkg.type_name), addl,
8433                                                                         indent, pkgprint(pkg.cp))
8434                                                         if (newlp-nc_len(myprint)) > 0:
8435                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8436                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8437                                                         if (oldlp-nc_len(myprint)) > 0:
8438                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8439                                                         myprint=myprint+myoldbest
8440                                                         myprint += darkgreen("to " + pkg.root)
8441                                         else:
8442                                                 if not pkg_merge:
8443                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8444                                                 else:
8445                                                         myprint = "[%s %s] " % (pkgprint(pkg_type), addl)
8446                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8447                                                         myoldbest + darkgreen("to " + myroot)
8448                                 else:
8449                                         if "--columns" in self.myopts:
8450                                                 if "--quiet" in self.myopts:
8451                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8452                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8453                                                         myprint=myprint+myoldbest
8454                                                         verboseadd = None
8455                                                 else:
8456                                                         if not pkg_merge:
8457                                                                 myprint = "[%s] %s%s" % \
8458                                                                         (pkgprint(pkg_status.ljust(13)),
8459                                                                         indent, pkgprint(pkg.cp))
8460                                                         else:
8461                                                                 myprint = "[%s %s] %s%s" % \
8462                                                                         (pkgprint(pkg.type_name), addl,
8463                                                                         indent, pkgprint(pkg.cp))
8464                                                         if (newlp-nc_len(myprint)) > 0:
8465                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8466                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8467                                                         if (oldlp-nc_len(myprint)) > 0:
8468                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8469                                                         myprint += myoldbest
8470                                         else:
8471                                                 if not pkg_merge:
8472                                                         myprint = "[%s] %s%s %s" % \
8473                                                                 (pkgprint(pkg_status.ljust(13)),
8474                                                                 indent, pkgprint(pkg.cpv),
8475                                                                 myoldbest)
8476                                                 else:
8477                                                         myprint = "[%s %s] %s%s %s" % \
8478                                                                 (pkgprint(pkg_type), addl, indent,
8479                                                                 pkgprint(pkg.cpv), myoldbest)
8480
8481                                 if columns and pkg.operation == "uninstall":
8482                                         continue
8483                                 p.append((myprint, verboseadd, repoadd))
8484
8485                                 if "--tree" not in self.myopts and \
8486                                         "--quiet" not in self.myopts and \
8487                                         not self._opts_no_restart.intersection(self.myopts) and \
8488                                         pkg.root == self._running_root.root and \
8489                                         portage.match_from_list(
8490                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8491                                         not vardb.cpv_exists(pkg.cpv) and \
8492                                         "--quiet" not in self.myopts:
8493                                                 if mylist_index < len(mylist) - 1:
8494                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8495                                                         p.append(colorize("WARN", "    then resume the merge."))
8496
8497                 out = sys.stdout
8498                 show_repos = repoadd_set and repoadd_set != set(["0"])
8499
8500                 for x in p:
8501                         if isinstance(x, basestring):
8502                                 out.write("%s\n" % (x,))
8503                                 continue
8504
8505                         myprint, verboseadd, repoadd = x
8506
8507                         if verboseadd:
8508                                 myprint += " " + verboseadd
8509
8510                         if show_repos and repoadd:
8511                                 myprint += " " + teal("[%s]" % repoadd)
8512
8513                         out.write("%s\n" % (myprint,))
8514
8515                 for x in blockers:
8516                         print x
8517
8518                 if verbosity == 3:
8519                         print
8520                         print counters
8521                         if show_repos:
8522                                 sys.stdout.write(str(repo_display))
8523
8524                 if "--changelog" in self.myopts:
8525                         print
8526                         for revision,text in changelogs:
8527                                 print bold('*'+revision)
8528                                 sys.stdout.write(text)
8529
8530                 sys.stdout.flush()
8531                 return os.EX_OK
8532
8533         def display_problems(self):
8534                 """
8535                 Display problems with the dependency graph such as slot collisions.
8536                 This is called internally by display() to show the problems _after_
8537                 the merge list where it is most likely to be seen, but if display()
8538                 is not going to be called then this method should be called explicitly
8539                 to ensure that the user is notified of problems with the graph.
8540
8541                 All output goes to stderr, except for unsatisfied dependencies which
8542                 go to stdout for parsing by programs such as autounmask.
8543                 """
8544
8545                 # Note that show_masked_packages() sends it's output to
8546                 # stdout, and some programs such as autounmask parse the
8547                 # output in cases when emerge bails out. However, when
8548                 # show_masked_packages() is called for installed packages
8549                 # here, the message is a warning that is more appropriate
8550                 # to send to stderr, so temporarily redirect stdout to
8551                 # stderr. TODO: Fix output code so there's a cleaner way
8552                 # to redirect everything to stderr.
8553                 sys.stdout.flush()
8554                 sys.stderr.flush()
8555                 stdout = sys.stdout
8556                 try:
8557                         sys.stdout = sys.stderr
8558                         self._display_problems()
8559                 finally:
8560                         sys.stdout = stdout
8561                         sys.stdout.flush()
8562                         sys.stderr.flush()
8563
8564                 # This goes to stdout for parsing by programs like autounmask.
8565                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8566                         self._show_unsatisfied_dep(*pargs, **kwargs)
8567
8568         def _display_problems(self):
8569                 if self._circular_deps_for_display is not None:
8570                         self._show_circular_deps(
8571                                 self._circular_deps_for_display)
8572
8573                 # The user is only notified of a slot conflict if
8574                 # there are no unresolvable blocker conflicts.
8575                 if self._unsatisfied_blockers_for_display is not None:
8576                         self._show_unsatisfied_blockers(
8577                                 self._unsatisfied_blockers_for_display)
8578                 else:
8579                         self._show_slot_collision_notice()
8580
8581                 # TODO: Add generic support for "set problem" handlers so that
8582                 # the below warnings aren't special cases for world only.
8583
8584                 if self._missing_args:
8585                         world_problems = False
8586                         if "world" in self._sets:
8587                                 # Filter out indirect members of world (from nested sets)
8588                                 # since only direct members of world are desired here.
8589                                 world_set = self.roots[self.target_root].sets["world"]
8590                                 for arg, atom in self._missing_args:
8591                                         if arg.name == "world" and atom in world_set:
8592                                                 world_problems = True
8593                                                 break
8594
8595                         if world_problems:
8596                                 sys.stderr.write("\n!!! Problems have been " + \
8597                                         "detected with your world file\n")
8598                                 sys.stderr.write("!!! Please run " + \
8599                                         green("emaint --check world")+"\n\n")
8600
8601                 if self._missing_args:
8602                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8603                                 " Ebuilds for the following packages are either all\n")
8604                         sys.stderr.write(colorize("BAD", "!!!") + \
8605                                 " masked or don't exist:\n")
8606                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8607                                 self._missing_args) + "\n")
8608
8609                 if self._pprovided_args:
8610                         arg_refs = {}
8611                         for arg, atom in self._pprovided_args:
8612                                 if isinstance(arg, SetArg):
8613                                         parent = arg.name
8614                                         arg_atom = (atom, atom)
8615                                 else:
8616                                         parent = "args"
8617                                         arg_atom = (arg.arg, atom)
8618                                 refs = arg_refs.setdefault(arg_atom, [])
8619                                 if parent not in refs:
8620                                         refs.append(parent)
8621                         msg = []
8622                         msg.append(bad("\nWARNING: "))
8623                         if len(self._pprovided_args) > 1:
8624                                 msg.append("Requested packages will not be " + \
8625                                         "merged because they are listed in\n")
8626                         else:
8627                                 msg.append("A requested package will not be " + \
8628                                         "merged because it is listed in\n")
8629                         msg.append("package.provided:\n\n")
8630                         problems_sets = set()
8631                         for (arg, atom), refs in arg_refs.iteritems():
8632                                 ref_string = ""
8633                                 if refs:
8634                                         problems_sets.update(refs)
8635                                         refs.sort()
8636                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8637                                         ref_string = " pulled in by " + ref_string
8638                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8639                         msg.append("\n")
8640                         if "world" in problems_sets:
8641                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8642                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8643                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8644                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8645                                 msg.append("The best course of action depends on the reason that an offending\n")
8646                                 msg.append("package.provided entry exists.\n\n")
8647                         sys.stderr.write("".join(msg))
8648
8649                 masked_packages = []
8650                 for pkg in self._masked_installed:
8651                         root_config = pkg.root_config
8652                         pkgsettings = self.pkgsettings[pkg.root]
8653                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8654                         masked_packages.append((root_config, pkgsettings,
8655                                 pkg.cpv, pkg.metadata, mreasons))
8656                 if masked_packages:
8657                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8658                                 " The following installed packages are masked:\n")
8659                         show_masked_packages(masked_packages)
8660                         show_mask_docs()
8661                         print
8662
8663         def calc_changelog(self,ebuildpath,current,next):
8664                 if ebuildpath == None or not os.path.exists(ebuildpath):
8665                         return []
8666                 current = '-'.join(portage.catpkgsplit(current)[1:])
8667                 if current.endswith('-r0'):
8668                         current = current[:-3]
8669                 next = '-'.join(portage.catpkgsplit(next)[1:])
8670                 if next.endswith('-r0'):
8671                         next = next[:-3]
8672                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8673                 try:
8674                         changelog = open(changelogpath).read()
8675                 except SystemExit, e:
8676                         raise # Needed else can't exit
8677                 except:
8678                         return []
8679                 divisions = self.find_changelog_tags(changelog)
8680                 #print 'XX from',current,'to',next
8681                 #for div,text in divisions: print 'XX',div
8682                 # skip entries for all revisions above the one we are about to emerge
8683                 for i in range(len(divisions)):
8684                         if divisions[i][0]==next:
8685                                 divisions = divisions[i:]
8686                                 break
8687                 # find out how many entries we are going to display
8688                 for i in range(len(divisions)):
8689                         if divisions[i][0]==current:
8690                                 divisions = divisions[:i]
8691                                 break
8692                 else:
8693                     # couldnt find the current revision in the list. display nothing
8694                         return []
8695                 return divisions
8696
8697         def find_changelog_tags(self,changelog):
8698                 divs = []
8699                 release = None
8700                 while 1:
8701                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8702                         if match is None:
8703                                 if release is not None:
8704                                         divs.append((release,changelog))
8705                                 return divs
8706                         if release is not None:
8707                                 divs.append((release,changelog[:match.start()]))
8708                         changelog = changelog[match.end():]
8709                         release = match.group(1)
8710                         if release.endswith('.ebuild'):
8711                                 release = release[:-7]
8712                         if release.endswith('-r0'):
8713                                 release = release[:-3]
8714
8715         def saveNomergeFavorites(self):
8716                 """Find atoms in favorites that are not in the mergelist and add them
8717                 to the world file if necessary."""
8718                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8719                         "--oneshot", "--onlydeps", "--pretend"):
8720                         if x in self.myopts:
8721                                 return
8722                 root_config = self.roots[self.target_root]
8723                 world_set = root_config.sets["world"]
8724
8725                 world_locked = False
8726                 if hasattr(world_set, "lock"):
8727                         world_set.lock()
8728                         world_locked = True
8729
8730                 if hasattr(world_set, "load"):
8731                         world_set.load() # maybe it's changed on disk
8732
8733                 args_set = self._sets["args"]
8734                 portdb = self.trees[self.target_root]["porttree"].dbapi
8735                 added_favorites = set()
8736                 for x in self._set_nodes:
8737                         pkg_type, root, pkg_key, pkg_status = x
8738                         if pkg_status != "nomerge":
8739                                 continue
8740
8741                         try:
8742                                 myfavkey = create_world_atom(x, args_set, root_config)
8743                                 if myfavkey:
8744                                         if myfavkey in added_favorites:
8745                                                 continue
8746                                         added_favorites.add(myfavkey)
8747                         except portage.exception.InvalidDependString, e:
8748                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8749                                         (pkg_key, str(e)), noiselevel=-1)
8750                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8751                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8752                                 del e
8753                 all_added = []
8754                 for k in self._sets:
8755                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8756                                 continue
8757                         s = SETPREFIX + k
8758                         if s in world_set:
8759                                 continue
8760                         all_added.append(SETPREFIX + k)
8761                 all_added.extend(added_favorites)
8762                 all_added.sort()
8763                 for a in all_added:
8764                         print ">>> Recording %s in \"world\" favorites file..." % \
8765                                 colorize("INFORM", str(a))
8766                 if all_added:
8767                         world_set.update(all_added)
8768
8769                 if world_locked:
8770                         world_set.unlock()
8771
8772         def loadResumeCommand(self, resume_data, skip_masked=True,
8773                 skip_missing=True):
8774                 """
8775                 Add a resume command to the graph and validate it in the process.  This
8776                 will raise a PackageNotFound exception if a package is not available.
8777                 """
8778
8779                 if not isinstance(resume_data, dict):
8780                         return False
8781
8782                 mergelist = resume_data.get("mergelist")
8783                 if not isinstance(mergelist, list):
8784                         mergelist = []
8785
8786                 fakedb = self.mydbapi
8787                 trees = self.trees
8788                 serialized_tasks = []
8789                 masked_tasks = []
8790                 for x in mergelist:
8791                         if not (isinstance(x, list) and len(x) == 4):
8792                                 continue
8793                         pkg_type, myroot, pkg_key, action = x
8794                         if pkg_type not in self.pkg_tree_map:
8795                                 continue
8796                         if action != "merge":
8797                                 continue
8798                         tree_type = self.pkg_tree_map[pkg_type]
8799                         mydb = trees[myroot][tree_type].dbapi
8800                         db_keys = list(self._trees_orig[myroot][
8801                                 tree_type].dbapi._aux_cache_keys)
8802                         try:
8803                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8804                         except KeyError:
8805                                 # It does no exist or it is corrupt.
8806                                 if action == "uninstall":
8807                                         continue
8808                                 if skip_missing:
8809                                         # TODO: log these somewhere
8810                                         continue
8811                                 raise portage.exception.PackageNotFound(pkg_key)
8812                         installed = action == "uninstall"
8813                         built = pkg_type != "ebuild"
8814                         root_config = self.roots[myroot]
8815                         pkg = Package(built=built, cpv=pkg_key,
8816                                 installed=installed, metadata=metadata,
8817                                 operation=action, root_config=root_config,
8818                                 type_name=pkg_type)
8819                         if pkg_type == "ebuild":
8820                                 pkgsettings = self.pkgsettings[myroot]
8821                                 pkgsettings.setcpv(pkg)
8822                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8823                                 pkg.metadata['CHOST'] = pkgsettings.get('CHOST', '')
8824                         self._pkg_cache[pkg] = pkg
8825
8826                         root_config = self.roots[pkg.root]
8827                         if "merge" == pkg.operation and \
8828                                 not visible(root_config.settings, pkg):
8829                                 if skip_masked:
8830                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8831                                 else:
8832                                         self._unsatisfied_deps_for_display.append(
8833                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8834
8835                         fakedb[myroot].cpv_inject(pkg)
8836                         serialized_tasks.append(pkg)
8837                         self.spinner.update()
8838
8839                 if self._unsatisfied_deps_for_display:
8840                         return False
8841
8842                 if not serialized_tasks or "--nodeps" in self.myopts:
8843                         self._serialized_tasks_cache = serialized_tasks
8844                         self._scheduler_graph = self.digraph
8845                 else:
8846                         self._select_package = self._select_pkg_from_graph
8847                         self.myparams.add("selective")
8848                         # Always traverse deep dependencies in order to account for
8849                         # potentially unsatisfied dependencies of installed packages.
8850                         # This is necessary for correct --keep-going or --resume operation
8851                         # in case a package from a group of circularly dependent packages
8852                         # fails. In this case, a package which has recently been installed
8853                         # may have an unsatisfied circular dependency (pulled in by
8854                         # PDEPEND, for example). So, even though a package is already
8855                         # installed, it may not have all of it's dependencies satisfied, so
8856                         # it may not be usable. If such a package is in the subgraph of
8857                         # deep depenedencies of a scheduled build, that build needs to
8858                         # be cancelled. In order for this type of situation to be
8859                         # recognized, deep traversal of dependencies is required.
8860                         self.myparams.add("deep")
8861
8862                         favorites = resume_data.get("favorites")
8863                         args_set = self._sets["args"]
8864                         if isinstance(favorites, list):
8865                                 args = self._load_favorites(favorites)
8866                         else:
8867                                 args = []
8868
8869                         for task in serialized_tasks:
8870                                 if isinstance(task, Package) and \
8871                                         task.operation == "merge":
8872                                         if not self._add_pkg(task, None):
8873                                                 return False
8874
8875                         # Packages for argument atoms need to be explicitly
8876                         # added via _add_pkg() so that they are included in the
8877                         # digraph (needed at least for --tree display).
8878                         for arg in args:
8879                                 for atom in arg.set:
8880                                         pkg, existing_node = self._select_package(
8881                                                 arg.root_config.root, atom)
8882                                         if existing_node is None and \
8883                                                 pkg is not None:
8884                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8885                                                         root=pkg.root, parent=arg)):
8886                                                         return False
8887
8888                         # Allow unsatisfied deps here to avoid showing a masking
8889                         # message for an unsatisfied dep that isn't necessarily
8890                         # masked.
8891                         if not self._create_graph(allow_unsatisfied=True):
8892                                 return False
8893
8894                         unsatisfied_deps = []
8895                         for dep in self._unsatisfied_deps:
8896                                 if not isinstance(dep.parent, Package):
8897                                         continue
8898                                 if dep.parent.operation == "merge":
8899                                         unsatisfied_deps.append(dep)
8900                                         continue
8901
8902                                 # For unsatisfied deps of installed packages, only account for
8903                                 # them if they are in the subgraph of dependencies of a package
8904                                 # which is scheduled to be installed.
8905                                 unsatisfied_install = False
8906                                 traversed = set()
8907                                 dep_stack = self.digraph.parent_nodes(dep.parent)
8908                                 while dep_stack:
8909                                         node = dep_stack.pop()
8910                                         if not isinstance(node, Package):
8911                                                 continue
8912                                         if node.operation == "merge":
8913                                                 unsatisfied_install = True
8914                                                 break
8915                                         if node in traversed:
8916                                                 continue
8917                                         traversed.add(node)
8918                                         dep_stack.extend(self.digraph.parent_nodes(node))
8919
8920                                 if unsatisfied_install:
8921                                         unsatisfied_deps.append(dep)
8922
8923                         if masked_tasks or unsatisfied_deps:
8924                                 # This probably means that a required package
8925                                 # was dropped via --skipfirst. It makes the
8926                                 # resume list invalid, so convert it to a
8927                                 # UnsatisfiedResumeDep exception.
8928                                 raise self.UnsatisfiedResumeDep(self,
8929                                         masked_tasks + unsatisfied_deps)
8930                         self._serialized_tasks_cache = None
8931                         try:
8932                                 self.altlist()
8933                         except self._unknown_internal_error:
8934                                 return False
8935
8936                 return True
8937
8938         def _load_favorites(self, favorites):
8939                 """
8940                 Use a list of favorites to resume state from a
8941                 previous select_files() call. This creates similar
8942                 DependencyArg instances to those that would have
8943                 been created by the original select_files() call.
8944                 This allows Package instances to be matched with
8945                 DependencyArg instances during graph creation.
8946                 """
8947                 root_config = self.roots[self.target_root]
8948                 getSetAtoms = root_config.setconfig.getSetAtoms
8949                 sets = root_config.sets
8950                 args = []
8951                 for x in favorites:
8952                         if not isinstance(x, basestring):
8953                                 continue
8954                         if x in ("system", "world"):
8955                                 x = SETPREFIX + x
8956                         if x.startswith(SETPREFIX):
8957                                 s = x[len(SETPREFIX):]
8958                                 if s not in sets:
8959                                         continue
8960                                 if s in self._sets:
8961                                         continue
8962                                 # Recursively expand sets so that containment tests in
8963                                 # self._get_parent_sets() properly match atoms in nested
8964                                 # sets (like if world contains system).
8965                                 expanded_set = InternalPackageSet(
8966                                         initial_atoms=getSetAtoms(s))
8967                                 self._sets[s] = expanded_set
8968                                 args.append(SetArg(arg=x, set=expanded_set,
8969                                         root_config=root_config))
8970                         else:
8971                                 if not portage.isvalidatom(x):
8972                                         continue
8973                                 args.append(AtomArg(arg=x, atom=x,
8974                                         root_config=root_config))
8975
8976                 self._set_args(args)
8977                 return args
8978
8979         class UnsatisfiedResumeDep(portage.exception.PortageException):
8980                 """
8981                 A dependency of a resume list is not installed. This
8982                 can occur when a required package is dropped from the
8983                 merge list via --skipfirst.
8984                 """
8985                 def __init__(self, depgraph, value):
8986                         portage.exception.PortageException.__init__(self, value)
8987                         self.depgraph = depgraph
8988
8989         class _internal_exception(portage.exception.PortageException):
8990                 def __init__(self, value=""):
8991                         portage.exception.PortageException.__init__(self, value)
8992
8993         class _unknown_internal_error(_internal_exception):
8994                 """
8995                 Used by the depgraph internally to terminate graph creation.
8996                 The specific reason for the failure should have been dumped
8997                 to stderr, unfortunately, the exact reason for the failure
8998                 may not be known.
8999                 """
9000
9001         class _serialize_tasks_retry(_internal_exception):
9002                 """
9003                 This is raised by the _serialize_tasks() method when it needs to
9004                 be called again for some reason. The only case that it's currently
9005                 used for is when neglected dependencies need to be added to the
9006                 graph in order to avoid making a potentially unsafe decision.
9007                 """
9008
9009         class _dep_check_composite_db(portage.dbapi):
9010                 """
9011                 A dbapi-like interface that is optimized for use in dep_check() calls.
9012                 This is built on top of the existing depgraph package selection logic.
9013                 Some packages that have been added to the graph may be masked from this
9014                 view in order to influence the atom preference selection that occurs
9015                 via dep_check().
9016                 """
9017                 def __init__(self, depgraph, root):
9018                         portage.dbapi.__init__(self)
9019                         self._depgraph = depgraph
9020                         self._root = root
9021                         self._match_cache = {}
9022                         self._cpv_pkg_map = {}
9023
9024                 def _clear_cache(self):
9025                         self._match_cache.clear()
9026                         self._cpv_pkg_map.clear()
9027
9028                 def match(self, atom):
9029                         ret = self._match_cache.get(atom)
9030                         if ret is not None:
9031                                 return ret[:]
9032                         orig_atom = atom
9033                         if "/" not in atom:
9034                                 atom = self._dep_expand(atom)
9035                         pkg, existing = self._depgraph._select_package(self._root, atom)
9036                         if not pkg:
9037                                 ret = []
9038                         else:
9039                                 # Return the highest available from select_package() as well as
9040                                 # any matching slots in the graph db.
9041                                 slots = set()
9042                                 slots.add(pkg.metadata["SLOT"])
9043                                 atom_cp = portage.dep_getkey(atom)
9044                                 if pkg.cp.startswith("virtual/"):
9045                                         # For new-style virtual lookahead that occurs inside
9046                                         # dep_check(), examine all slots. This is needed
9047                                         # so that newer slots will not unnecessarily be pulled in
9048                                         # when a satisfying lower slot is already installed. For
9049                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
9050                                         # there's no need to pull in a newer slot to satisfy a
9051                                         # virtual/jdk dependency.
9052                                         for db, pkg_type, built, installed, db_keys in \
9053                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
9054                                                 for cpv in db.match(atom):
9055                                                         if portage.cpv_getkey(cpv) != pkg.cp:
9056                                                                 continue
9057                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
9058                                 ret = []
9059                                 if self._visible(pkg):
9060                                         self._cpv_pkg_map[pkg.cpv] = pkg
9061                                         ret.append(pkg.cpv)
9062                                 slots.remove(pkg.metadata["SLOT"])
9063                                 while slots:
9064                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
9065                                         pkg, existing = self._depgraph._select_package(
9066                                                 self._root, slot_atom)
9067                                         if not pkg:
9068                                                 continue
9069                                         if not self._visible(pkg):
9070                                                 continue
9071                                         self._cpv_pkg_map[pkg.cpv] = pkg
9072                                         ret.append(pkg.cpv)
9073                                 if ret:
9074                                         self._cpv_sort_ascending(ret)
9075                         self._match_cache[orig_atom] = ret
9076                         return ret[:]
9077
9078                 def _visible(self, pkg):
9079                         if pkg.installed and "selective" not in self._depgraph.myparams:
9080                                 try:
9081                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
9082                                 except (StopIteration, portage.exception.InvalidDependString):
9083                                         arg = None
9084                                 if arg:
9085                                         return False
9086                         if pkg.installed:
9087                                 try:
9088                                         if not visible(
9089                                                 self._depgraph.pkgsettings[pkg.root], pkg):
9090                                                 return False
9091                                 except portage.exception.InvalidDependString:
9092                                         pass
9093                         in_graph = self._depgraph._slot_pkg_map[
9094                                 self._root].get(pkg.slot_atom)
9095                         if in_graph is None:
9096                                 # Mask choices for packages which are not the highest visible
9097                                 # version within their slot (since they usually trigger slot
9098                                 # conflicts).
9099                                 highest_visible, in_graph = self._depgraph._select_package(
9100                                         self._root, pkg.slot_atom)
9101                                 if pkg != highest_visible:
9102                                         return False
9103                         elif in_graph != pkg:
9104                                 # Mask choices for packages that would trigger a slot
9105                                 # conflict with a previously selected package.
9106                                 return False
9107                         return True
9108
9109                 def _dep_expand(self, atom):
9110                         """
9111                         This is only needed for old installed packages that may
9112                         contain atoms that are not fully qualified with a specific
9113                         category. Emulate the cpv_expand() function that's used by
9114                         dbapi.match() in cases like this. If there are multiple
9115                         matches, it's often due to a new-style virtual that has
9116                         been added, so try to filter those out to avoid raising
9117                         a ValueError.
9118                         """
9119                         root_config = self._depgraph.roots[self._root]
9120                         orig_atom = atom
9121                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
9122                         if len(expanded_atoms) > 1:
9123                                 non_virtual_atoms = []
9124                                 for x in expanded_atoms:
9125                                         if not portage.dep_getkey(x).startswith("virtual/"):
9126                                                 non_virtual_atoms.append(x)
9127                                 if len(non_virtual_atoms) == 1:
9128                                         expanded_atoms = non_virtual_atoms
9129                         if len(expanded_atoms) > 1:
9130                                 # compatible with portage.cpv_expand()
9131                                 raise portage.exception.AmbiguousPackageName(
9132                                         [portage.dep_getkey(x) for x in expanded_atoms])
9133                         if expanded_atoms:
9134                                 atom = expanded_atoms[0]
9135                         else:
9136                                 null_atom = insert_category_into_atom(atom, "null")
9137                                 null_cp = portage.dep_getkey(null_atom)
9138                                 cat, atom_pn = portage.catsplit(null_cp)
9139                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
9140                                 if virts_p:
9141                                         # Allow the resolver to choose which virtual.
9142                                         atom = insert_category_into_atom(atom, "virtual")
9143                                 else:
9144                                         atom = insert_category_into_atom(atom, "null")
9145                         return atom
9146
9147                 def aux_get(self, cpv, wants):
9148                         metadata = self._cpv_pkg_map[cpv].metadata
9149                         return [metadata.get(x, "") for x in wants]
9150
9151 class RepoDisplay(object):
9152         def __init__(self, roots):
9153                 self._shown_repos = {}
9154                 self._unknown_repo = False
9155                 repo_paths = set()
9156                 for root_config in roots.itervalues():
9157                         portdir = root_config.settings.get("PORTDIR")
9158                         if portdir:
9159                                 repo_paths.add(portdir)
9160                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
9161                         if overlays:
9162                                 repo_paths.update(overlays.split())
9163                 repo_paths = list(repo_paths)
9164                 self._repo_paths = repo_paths
9165                 self._repo_paths_real = [ os.path.realpath(repo_path) \
9166                         for repo_path in repo_paths ]
9167
9168                 # pre-allocate index for PORTDIR so that it always has index 0.
9169                 for root_config in roots.itervalues():
9170                         portdb = root_config.trees["porttree"].dbapi
9171                         portdir = portdb.porttree_root
9172                         if portdir:
9173                                 self.repoStr(portdir)
9174
9175         def repoStr(self, repo_path_real):
9176                 real_index = -1
9177                 if repo_path_real:
9178                         real_index = self._repo_paths_real.index(repo_path_real)
9179                 if real_index == -1:
9180                         s = "?"
9181                         self._unknown_repo = True
9182                 else:
9183                         shown_repos = self._shown_repos
9184                         repo_paths = self._repo_paths
9185                         repo_path = repo_paths[real_index]
9186                         index = shown_repos.get(repo_path)
9187                         if index is None:
9188                                 index = len(shown_repos)
9189                                 shown_repos[repo_path] = index
9190                         s = str(index)
9191                 return s
9192
9193         def __str__(self):
9194                 output = []
9195                 shown_repos = self._shown_repos
9196                 unknown_repo = self._unknown_repo
9197                 if shown_repos or self._unknown_repo:
9198                         output.append("Portage tree and overlays:\n")
9199                 show_repo_paths = list(shown_repos)
9200                 for repo_path, repo_index in shown_repos.iteritems():
9201                         show_repo_paths[repo_index] = repo_path
9202                 if show_repo_paths:
9203                         for index, repo_path in enumerate(show_repo_paths):
9204                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
9205                 if unknown_repo:
9206                         output.append(" "+teal("[?]") + \
9207                                 " indicates that the source repository could not be determined\n")
9208                 return "".join(output)
9209
9210 class PackageCounters(object):
9211
9212         def __init__(self):
9213                 self.upgrades   = 0
9214                 self.downgrades = 0
9215                 self.new        = 0
9216                 self.newslot    = 0
9217                 self.reinst     = 0
9218                 self.uninst     = 0
9219                 self.blocks     = 0
9220                 self.blocks_satisfied         = 0
9221                 self.totalsize  = 0
9222                 self.restrict_fetch           = 0
9223                 self.restrict_fetch_satisfied = 0
9224                 self.interactive              = 0
9225
9226         def __str__(self):
9227                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
9228                 myoutput = []
9229                 details = []
9230                 myoutput.append("Total: %s package" % total_installs)
9231                 if total_installs != 1:
9232                         myoutput.append("s")
9233                 if total_installs != 0:
9234                         myoutput.append(" (")
9235                 if self.upgrades > 0:
9236                         details.append("%s upgrade" % self.upgrades)
9237                         if self.upgrades > 1:
9238                                 details[-1] += "s"
9239                 if self.downgrades > 0:
9240                         details.append("%s downgrade" % self.downgrades)
9241                         if self.downgrades > 1:
9242                                 details[-1] += "s"
9243                 if self.new > 0:
9244                         details.append("%s new" % self.new)
9245                 if self.newslot > 0:
9246                         details.append("%s in new slot" % self.newslot)
9247                         if self.newslot > 1:
9248                                 details[-1] += "s"
9249                 if self.reinst > 0:
9250                         details.append("%s reinstall" % self.reinst)
9251                         if self.reinst > 1:
9252                                 details[-1] += "s"
9253                 if self.uninst > 0:
9254                         details.append("%s uninstall" % self.uninst)
9255                         if self.uninst > 1:
9256                                 details[-1] += "s"
9257                 if self.interactive > 0:
9258                         details.append("%s %s" % (self.interactive,
9259                                 colorize("WARN", "interactive")))
9260                 myoutput.append(", ".join(details))
9261                 if total_installs != 0:
9262                         myoutput.append(")")
9263                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
9264                 if self.restrict_fetch:
9265                         myoutput.append("\nFetch Restriction: %s package" % \
9266                                 self.restrict_fetch)
9267                         if self.restrict_fetch > 1:
9268                                 myoutput.append("s")
9269                 if self.restrict_fetch_satisfied < self.restrict_fetch:
9270                         myoutput.append(bad(" (%s unsatisfied)") % \
9271                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
9272                 if self.blocks > 0:
9273                         myoutput.append("\nConflict: %s block" % \
9274                                 self.blocks)
9275                         if self.blocks > 1:
9276                                 myoutput.append("s")
9277                         if self.blocks_satisfied < self.blocks:
9278                                 myoutput.append(bad(" (%s unsatisfied)") % \
9279                                         (self.blocks - self.blocks_satisfied))
9280                 return "".join(myoutput)
9281
9282 class UseFlagDisplay(object):
9283
9284         __slots__ = ('name', 'enabled', 'forced')
9285
9286         def __init__(self, name, enabled, forced):
9287                 self.name = name
9288                 self.enabled = enabled
9289                 self.forced = forced
9290
9291         def __str__(self):
9292                 s = self.name
9293                 if self.enabled:
9294                         s = red(s)
9295                 else:
9296                         s = '-' + s
9297                         s = blue(s)
9298                 if self.forced:
9299                         s = '(%s)' % s
9300                 return s
9301
9302         def _cmp_combined(a, b):
9303                 """
9304                 Sort by name, combining enabled and disabled flags.
9305                 """
9306                 return (a.name > b.name) - (a.name < b.name)
9307
9308         sort_combined = cmp_sort_key(_cmp_combined)
9309         del _cmp_combined
9310
9311         def _cmp_separated(a, b):
9312                 """
9313                 Sort by name, separating enabled flags from disabled flags.
9314                 """
9315                 enabled_diff = b.enabled - a.enabled
9316                 if enabled_diff:
9317                         return enabled_diff
9318                 return (a.name > b.name) - (a.name < b.name)
9319
9320         sort_separated = cmp_sort_key(_cmp_separated)
9321         del _cmp_separated
9322
9323 class PollSelectAdapter(PollConstants):
9324
9325         """
9326         Use select to emulate a poll object, for
9327         systems that don't support poll().
9328         """
9329
9330         def __init__(self):
9331                 self._registered = {}
9332                 self._select_args = [[], [], []]
9333
9334         def register(self, fd, *args):
9335                 """
9336                 Only POLLIN is currently supported!
9337                 """
9338                 if len(args) > 1:
9339                         raise TypeError(
9340                                 "register expected at most 2 arguments, got " + \
9341                                 repr(1 + len(args)))
9342
9343                 eventmask = PollConstants.POLLIN | \
9344                         PollConstants.POLLPRI | PollConstants.POLLOUT
9345                 if args:
9346                         eventmask = args[0]
9347
9348                 self._registered[fd] = eventmask
9349                 self._select_args = None
9350
9351         def unregister(self, fd):
9352                 self._select_args = None
9353                 del self._registered[fd]
9354
9355         def poll(self, *args):
9356                 if len(args) > 1:
9357                         raise TypeError(
9358                                 "poll expected at most 2 arguments, got " + \
9359                                 repr(1 + len(args)))
9360
9361                 timeout = None
9362                 if args:
9363                         timeout = args[0]
9364
9365                 select_args = self._select_args
9366                 if select_args is None:
9367                         select_args = [self._registered.keys(), [], []]
9368
9369                 if timeout is not None:
9370                         select_args = select_args[:]
9371                         # Translate poll() timeout args to select() timeout args:
9372                         #
9373                         #          | units        | value(s) for indefinite block
9374                         # ---------|--------------|------------------------------
9375                         #   poll   | milliseconds | omitted, negative, or None
9376                         # ---------|--------------|------------------------------
9377                         #   select | seconds      | omitted
9378                         # ---------|--------------|------------------------------
9379
9380                         if timeout is not None and timeout < 0:
9381                                 timeout = None
9382                         if timeout is not None:
9383                                 select_args.append(timeout / 1000)
9384
9385                 select_events = select.select(*select_args)
9386                 poll_events = []
9387                 for fd in select_events[0]:
9388                         poll_events.append((fd, PollConstants.POLLIN))
9389                 return poll_events
9390
9391 class SequentialTaskQueue(SlotObject):
9392
9393         __slots__ = ("max_jobs", "running_tasks") + \
9394                 ("_dirty", "_scheduling", "_task_queue")
9395
9396         def __init__(self, **kwargs):
9397                 SlotObject.__init__(self, **kwargs)
9398                 self._task_queue = deque()
9399                 self.running_tasks = set()
9400                 if self.max_jobs is None:
9401                         self.max_jobs = 1
9402                 self._dirty = True
9403
9404         def add(self, task):
9405                 self._task_queue.append(task)
9406                 self._dirty = True
9407
9408         def addFront(self, task):
9409                 self._task_queue.appendleft(task)
9410                 self._dirty = True
9411
9412         def schedule(self):
9413
9414                 if not self._dirty:
9415                         return False
9416
9417                 if not self:
9418                         return False
9419
9420                 if self._scheduling:
9421                         # Ignore any recursive schedule() calls triggered via
9422                         # self._task_exit().
9423                         return False
9424
9425                 self._scheduling = True
9426
9427                 task_queue = self._task_queue
9428                 running_tasks = self.running_tasks
9429                 max_jobs = self.max_jobs
9430                 state_changed = False
9431
9432                 while task_queue and \
9433                         (max_jobs is True or len(running_tasks) < max_jobs):
9434                         task = task_queue.popleft()
9435                         cancelled = getattr(task, "cancelled", None)
9436                         if not cancelled:
9437                                 running_tasks.add(task)
9438                                 task.addExitListener(self._task_exit)
9439                                 task.start()
9440                         state_changed = True
9441
9442                 self._dirty = False
9443                 self._scheduling = False
9444
9445                 return state_changed
9446
9447         def _task_exit(self, task):
9448                 """
9449                 Since we can always rely on exit listeners being called, the set of
9450                 running tasks is always pruned automatically and there is never any need
9451                 to actively prune it.
9452                 """
9453                 self.running_tasks.remove(task)
9454                 if self._task_queue:
9455                         self._dirty = True
9456
9457         def clear(self):
9458                 self._task_queue.clear()
9459                 running_tasks = self.running_tasks
9460                 while running_tasks:
9461                         task = running_tasks.pop()
9462                         task.removeExitListener(self._task_exit)
9463                         task.cancel()
9464                 self._dirty = False
9465
9466         def __nonzero__(self):
9467                 return bool(self._task_queue or self.running_tasks)
9468
9469         def __len__(self):
9470                 return len(self._task_queue) + len(self.running_tasks)
9471
9472 _can_poll_device = None
9473
9474 def can_poll_device():
9475         """
9476         Test if it's possible to use poll() on a device such as a pty. This
9477         is known to fail on Darwin.
9478         @rtype: bool
9479         @returns: True if poll() on a device succeeds, False otherwise.
9480         """
9481
9482         global _can_poll_device
9483         if _can_poll_device is not None:
9484                 return _can_poll_device
9485
9486         if not hasattr(select, "poll"):
9487                 _can_poll_device = False
9488                 return _can_poll_device
9489
9490         try:
9491                 dev_null = open('/dev/null', 'rb')
9492         except IOError:
9493                 _can_poll_device = False
9494                 return _can_poll_device
9495
9496         p = select.poll()
9497         p.register(dev_null.fileno(), PollConstants.POLLIN)
9498
9499         invalid_request = False
9500         for f, event in p.poll():
9501                 if event & PollConstants.POLLNVAL:
9502                         invalid_request = True
9503                         break
9504         dev_null.close()
9505
9506         _can_poll_device = not invalid_request
9507         return _can_poll_device
9508
9509 def create_poll_instance():
9510         """
9511         Create an instance of select.poll, or an instance of
9512         PollSelectAdapter there is no poll() implementation or
9513         it is broken somehow.
9514         """
9515         if can_poll_device():
9516                 return select.poll()
9517         return PollSelectAdapter()
9518
9519 getloadavg = getattr(os, "getloadavg", None)
9520 if getloadavg is None:
9521         def getloadavg():
9522                 """
9523                 Uses /proc/loadavg to emulate os.getloadavg().
9524                 Raises OSError if the load average was unobtainable.
9525                 """
9526                 try:
9527                         loadavg_str = open('/proc/loadavg').readline()
9528                 except IOError:
9529                         # getloadavg() is only supposed to raise OSError, so convert
9530                         raise OSError('unknown')
9531                 loadavg_split = loadavg_str.split()
9532                 if len(loadavg_split) < 3:
9533                         raise OSError('unknown')
9534                 loadavg_floats = []
9535                 for i in xrange(3):
9536                         try:
9537                                 loadavg_floats.append(float(loadavg_split[i]))
9538                         except ValueError:
9539                                 raise OSError('unknown')
9540                 return tuple(loadavg_floats)
9541
9542 class PollScheduler(object):
9543
9544         class _sched_iface_class(SlotObject):
9545                 __slots__ = ("register", "schedule", "unregister")
9546
9547         def __init__(self):
9548                 self._max_jobs = 1
9549                 self._max_load = None
9550                 self._jobs = 0
9551                 self._poll_event_queue = []
9552                 self._poll_event_handlers = {}
9553                 self._poll_event_handler_ids = {}
9554                 # Increment id for each new handler.
9555                 self._event_handler_id = 0
9556                 self._poll_obj = create_poll_instance()
9557                 self._scheduling = False
9558
9559         def _schedule(self):
9560                 """
9561                 Calls _schedule_tasks() and automatically returns early from
9562                 any recursive calls to this method that the _schedule_tasks()
9563                 call might trigger. This makes _schedule() safe to call from
9564                 inside exit listeners.
9565                 """
9566                 if self._scheduling:
9567                         return False
9568                 self._scheduling = True
9569                 try:
9570                         return self._schedule_tasks()
9571                 finally:
9572                         self._scheduling = False
9573
9574         def _running_job_count(self):
9575                 return self._jobs
9576
9577         def _can_add_job(self):
9578                 max_jobs = self._max_jobs
9579                 max_load = self._max_load
9580
9581                 if self._max_jobs is not True and \
9582                         self._running_job_count() >= self._max_jobs:
9583                         return False
9584
9585                 if max_load is not None and \
9586                         (max_jobs is True or max_jobs > 1) and \
9587                         self._running_job_count() >= 1:
9588                         try:
9589                                 avg1, avg5, avg15 = getloadavg()
9590                         except OSError:
9591                                 return False
9592
9593                         if avg1 >= max_load:
9594                                 return False
9595
9596                 return True
9597
9598         def _poll(self, timeout=None):
9599                 """
9600                 All poll() calls pass through here. The poll events
9601                 are added directly to self._poll_event_queue.
9602                 In order to avoid endless blocking, this raises
9603                 StopIteration if timeout is None and there are
9604                 no file descriptors to poll.
9605                 """
9606                 if not self._poll_event_handlers:
9607                         self._schedule()
9608                         if timeout is None and \
9609                                 not self._poll_event_handlers:
9610                                 raise StopIteration(
9611                                         "timeout is None and there are no poll() event handlers")
9612
9613                 # The following error is known to occur with Linux kernel versions
9614                 # less than 2.6.24:
9615                 #
9616                 #   select.error: (4, 'Interrupted system call')
9617                 #
9618                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9619                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9620                 # without any events.
9621                 while True:
9622                         try:
9623                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9624                                 break
9625                         except select.error, e:
9626                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9627                                         level=logging.ERROR, noiselevel=-1)
9628                                 del e
9629                                 if timeout is not None:
9630                                         break
9631
9632         def _next_poll_event(self, timeout=None):
9633                 """
9634                 Since the _schedule_wait() loop is called by event
9635                 handlers from _poll_loop(), maintain a central event
9636                 queue for both of them to share events from a single
9637                 poll() call. In order to avoid endless blocking, this
9638                 raises StopIteration if timeout is None and there are
9639                 no file descriptors to poll.
9640                 """
9641                 if not self._poll_event_queue:
9642                         self._poll(timeout)
9643                 return self._poll_event_queue.pop()
9644
9645         def _poll_loop(self):
9646
9647                 event_handlers = self._poll_event_handlers
9648                 event_handled = False
9649
9650                 try:
9651                         while event_handlers:
9652                                 f, event = self._next_poll_event()
9653                                 handler, reg_id = event_handlers[f]
9654                                 handler(f, event)
9655                                 event_handled = True
9656                 except StopIteration:
9657                         event_handled = True
9658
9659                 if not event_handled:
9660                         raise AssertionError("tight loop")
9661
9662         def _schedule_yield(self):
9663                 """
9664                 Schedule for a short period of time chosen by the scheduler based
9665                 on internal state. Synchronous tasks should call this periodically
9666                 in order to allow the scheduler to service pending poll events. The
9667                 scheduler will call poll() exactly once, without blocking, and any
9668                 resulting poll events will be serviced.
9669                 """
9670                 event_handlers = self._poll_event_handlers
9671                 events_handled = 0
9672
9673                 if not event_handlers:
9674                         return bool(events_handled)
9675
9676                 if not self._poll_event_queue:
9677                         self._poll(0)
9678
9679                 try:
9680                         while event_handlers and self._poll_event_queue:
9681                                 f, event = self._next_poll_event()
9682                                 handler, reg_id = event_handlers[f]
9683                                 handler(f, event)
9684                                 events_handled += 1
9685                 except StopIteration:
9686                         events_handled += 1
9687
9688                 return bool(events_handled)
9689
9690         def _register(self, f, eventmask, handler):
9691                 """
9692                 @rtype: Integer
9693                 @return: A unique registration id, for use in schedule() or
9694                         unregister() calls.
9695                 """
9696                 if f in self._poll_event_handlers:
9697                         raise AssertionError("fd %d is already registered" % f)
9698                 self._event_handler_id += 1
9699                 reg_id = self._event_handler_id
9700                 self._poll_event_handler_ids[reg_id] = f
9701                 self._poll_event_handlers[f] = (handler, reg_id)
9702                 self._poll_obj.register(f, eventmask)
9703                 return reg_id
9704
9705         def _unregister(self, reg_id):
9706                 f = self._poll_event_handler_ids[reg_id]
9707                 self._poll_obj.unregister(f)
9708                 del self._poll_event_handlers[f]
9709                 del self._poll_event_handler_ids[reg_id]
9710
9711         def _schedule_wait(self, wait_ids):
9712                 """
9713                 Schedule until wait_id is not longer registered
9714                 for poll() events.
9715                 @type wait_id: int
9716                 @param wait_id: a task id to wait for
9717                 """
9718                 event_handlers = self._poll_event_handlers
9719                 handler_ids = self._poll_event_handler_ids
9720                 event_handled = False
9721
9722                 if isinstance(wait_ids, int):
9723                         wait_ids = frozenset([wait_ids])
9724
9725                 try:
9726                         while wait_ids.intersection(handler_ids):
9727                                 f, event = self._next_poll_event()
9728                                 handler, reg_id = event_handlers[f]
9729                                 handler(f, event)
9730                                 event_handled = True
9731                 except StopIteration:
9732                         event_handled = True
9733
9734                 return event_handled
9735
9736 class QueueScheduler(PollScheduler):
9737
9738         """
9739         Add instances of SequentialTaskQueue and then call run(). The
9740         run() method returns when no tasks remain.
9741         """
9742
9743         def __init__(self, max_jobs=None, max_load=None):
9744                 PollScheduler.__init__(self)
9745
9746                 if max_jobs is None:
9747                         max_jobs = 1
9748
9749                 self._max_jobs = max_jobs
9750                 self._max_load = max_load
9751                 self.sched_iface = self._sched_iface_class(
9752                         register=self._register,
9753                         schedule=self._schedule_wait,
9754                         unregister=self._unregister)
9755
9756                 self._queues = []
9757                 self._schedule_listeners = []
9758
9759         def add(self, q):
9760                 self._queues.append(q)
9761
9762         def remove(self, q):
9763                 self._queues.remove(q)
9764
9765         def run(self):
9766
9767                 while self._schedule():
9768                         self._poll_loop()
9769
9770                 while self._running_job_count():
9771                         self._poll_loop()
9772
9773         def _schedule_tasks(self):
9774                 """
9775                 @rtype: bool
9776                 @returns: True if there may be remaining tasks to schedule,
9777                         False otherwise.
9778                 """
9779                 while self._can_add_job():
9780                         n = self._max_jobs - self._running_job_count()
9781                         if n < 1:
9782                                 break
9783
9784                         if not self._start_next_job(n):
9785                                 return False
9786
9787                 for q in self._queues:
9788                         if q:
9789                                 return True
9790                 return False
9791
9792         def _running_job_count(self):
9793                 job_count = 0
9794                 for q in self._queues:
9795                         job_count += len(q.running_tasks)
9796                 self._jobs = job_count
9797                 return job_count
9798
9799         def _start_next_job(self, n=1):
9800                 started_count = 0
9801                 for q in self._queues:
9802                         initial_job_count = len(q.running_tasks)
9803                         q.schedule()
9804                         final_job_count = len(q.running_tasks)
9805                         if final_job_count > initial_job_count:
9806                                 started_count += (final_job_count - initial_job_count)
9807                         if started_count >= n:
9808                                 break
9809                 return started_count
9810
9811 class TaskScheduler(object):
9812
9813         """
9814         A simple way to handle scheduling of AsynchrousTask instances. Simply
9815         add tasks and call run(). The run() method returns when no tasks remain.
9816         """
9817
9818         def __init__(self, max_jobs=None, max_load=None):
9819                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9820                 self._scheduler = QueueScheduler(
9821                         max_jobs=max_jobs, max_load=max_load)
9822                 self.sched_iface = self._scheduler.sched_iface
9823                 self.run = self._scheduler.run
9824                 self._scheduler.add(self._queue)
9825
9826         def add(self, task):
9827                 self._queue.add(task)
9828
9829 class JobStatusDisplay(object):
9830
9831         _bound_properties = ("curval", "failed", "running")
9832         _jobs_column_width = 48
9833
9834         # Don't update the display unless at least this much
9835         # time has passed, in units of seconds.
9836         _min_display_latency = 2
9837
9838         _default_term_codes = {
9839                 'cr'  : '\r',
9840                 'el'  : '\x1b[K',
9841                 'nel' : '\n',
9842         }
9843
9844         _termcap_name_map = {
9845                 'carriage_return' : 'cr',
9846                 'clr_eol'         : 'el',
9847                 'newline'         : 'nel',
9848         }
9849
9850         def __init__(self, out=sys.stdout, quiet=False):
9851                 object.__setattr__(self, "out", out)
9852                 object.__setattr__(self, "quiet", quiet)
9853                 object.__setattr__(self, "maxval", 0)
9854                 object.__setattr__(self, "merges", 0)
9855                 object.__setattr__(self, "_changed", False)
9856                 object.__setattr__(self, "_displayed", False)
9857                 object.__setattr__(self, "_last_display_time", 0)
9858                 object.__setattr__(self, "width", 80)
9859                 self.reset()
9860
9861                 isatty = hasattr(out, "isatty") and out.isatty()
9862                 object.__setattr__(self, "_isatty", isatty)
9863                 if not isatty or not self._init_term():
9864                         term_codes = {}
9865                         for k, capname in self._termcap_name_map.iteritems():
9866                                 term_codes[k] = self._default_term_codes[capname]
9867                         object.__setattr__(self, "_term_codes", term_codes)
9868                 encoding = sys.getdefaultencoding()
9869                 for k, v in self._term_codes.items():
9870                         if not isinstance(v, basestring):
9871                                 self._term_codes[k] = v.decode(encoding, 'replace')
9872
9873         def _init_term(self):
9874                 """
9875                 Initialize term control codes.
9876                 @rtype: bool
9877                 @returns: True if term codes were successfully initialized,
9878                         False otherwise.
9879                 """
9880
9881                 term_type = os.environ.get("TERM", "vt100")
9882                 tigetstr = None
9883
9884                 try:
9885                         import curses
9886                         try:
9887                                 curses.setupterm(term_type, self.out.fileno())
9888                                 tigetstr = curses.tigetstr
9889                         except curses.error:
9890                                 pass
9891                 except ImportError:
9892                         pass
9893
9894                 if tigetstr is None:
9895                         return False
9896
9897                 term_codes = {}
9898                 for k, capname in self._termcap_name_map.iteritems():
9899                         code = tigetstr(capname)
9900                         if code is None:
9901                                 code = self._default_term_codes[capname]
9902                         term_codes[k] = code
9903                 object.__setattr__(self, "_term_codes", term_codes)
9904                 return True
9905
9906         def _format_msg(self, msg):
9907                 return ">>> %s" % msg
9908
9909         def _erase(self):
9910                 self.out.write(
9911                         self._term_codes['carriage_return'] + \
9912                         self._term_codes['clr_eol'])
9913                 self.out.flush()
9914                 self._displayed = False
9915
9916         def _display(self, line):
9917                 self.out.write(line)
9918                 self.out.flush()
9919                 self._displayed = True
9920
9921         def _update(self, msg):
9922
9923                 out = self.out
9924                 if not self._isatty:
9925                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9926                         self.out.flush()
9927                         self._displayed = True
9928                         return
9929
9930                 if self._displayed:
9931                         self._erase()
9932
9933                 self._display(self._format_msg(msg))
9934
9935         def displayMessage(self, msg):
9936
9937                 was_displayed = self._displayed
9938
9939                 if self._isatty and self._displayed:
9940                         self._erase()
9941
9942                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9943                 self.out.flush()
9944                 self._displayed = False
9945
9946                 if was_displayed:
9947                         self._changed = True
9948                         self.display()
9949
9950         def reset(self):
9951                 self.maxval = 0
9952                 self.merges = 0
9953                 for name in self._bound_properties:
9954                         object.__setattr__(self, name, 0)
9955
9956                 if self._displayed:
9957                         self.out.write(self._term_codes['newline'])
9958                         self.out.flush()
9959                         self._displayed = False
9960
9961         def __setattr__(self, name, value):
9962                 old_value = getattr(self, name)
9963                 if value == old_value:
9964                         return
9965                 object.__setattr__(self, name, value)
9966                 if name in self._bound_properties:
9967                         self._property_change(name, old_value, value)
9968
9969         def _property_change(self, name, old_value, new_value):
9970                 self._changed = True
9971                 self.display()
9972
9973         def _load_avg_str(self):
9974                 try:
9975                         avg = getloadavg()
9976                 except OSError:
9977                         return 'unknown'
9978
9979                 max_avg = max(avg)
9980
9981                 if max_avg < 10:
9982                         digits = 2
9983                 elif max_avg < 100:
9984                         digits = 1
9985                 else:
9986                         digits = 0
9987
9988                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9989
9990         def display(self):
9991                 """
9992                 Display status on stdout, but only if something has
9993                 changed since the last call.
9994                 """
9995
9996                 if self.quiet:
9997                         return
9998
9999                 current_time = time.time()
10000                 time_delta = current_time - self._last_display_time
10001                 if self._displayed and \
10002                         not self._changed:
10003                         if not self._isatty:
10004                                 return
10005                         if time_delta < self._min_display_latency:
10006                                 return
10007
10008                 self._last_display_time = current_time
10009                 self._changed = False
10010                 self._display_status()
10011
10012         def _display_status(self):
10013                 # Don't use len(self._completed_tasks) here since that also
10014                 # can include uninstall tasks.
10015                 curval_str = str(self.curval)
10016                 maxval_str = str(self.maxval)
10017                 running_str = str(self.running)
10018                 failed_str = str(self.failed)
10019                 load_avg_str = self._load_avg_str()
10020
10021                 color_output = StringIO()
10022                 plain_output = StringIO()
10023                 style_file = portage.output.ConsoleStyleFile(color_output)
10024                 style_file.write_listener = plain_output
10025                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
10026                 style_writer.style_listener = style_file.new_styles
10027                 f = formatter.AbstractFormatter(style_writer)
10028
10029                 number_style = "INFORM"
10030                 f.add_literal_data("Jobs: ")
10031                 f.push_style(number_style)
10032                 f.add_literal_data(curval_str)
10033                 f.pop_style()
10034                 f.add_literal_data(" of ")
10035                 f.push_style(number_style)
10036                 f.add_literal_data(maxval_str)
10037                 f.pop_style()
10038                 f.add_literal_data(" complete")
10039
10040                 if self.running:
10041                         f.add_literal_data(", ")
10042                         f.push_style(number_style)
10043                         f.add_literal_data(running_str)
10044                         f.pop_style()
10045                         f.add_literal_data(" running")
10046
10047                 if self.failed:
10048                         f.add_literal_data(", ")
10049                         f.push_style(number_style)
10050                         f.add_literal_data(failed_str)
10051                         f.pop_style()
10052                         f.add_literal_data(" failed")
10053
10054                 padding = self._jobs_column_width - len(plain_output.getvalue())
10055                 if padding > 0:
10056                         f.add_literal_data(padding * " ")
10057
10058                 f.add_literal_data("Load avg: ")
10059                 f.add_literal_data(load_avg_str)
10060
10061                 # Truncate to fit width, to avoid making the terminal scroll if the
10062                 # line overflows (happens when the load average is large).
10063                 plain_output = plain_output.getvalue()
10064                 if self._isatty and len(plain_output) > self.width:
10065                         # Use plain_output here since it's easier to truncate
10066                         # properly than the color output which contains console
10067                         # color codes.
10068                         self._update(plain_output[:self.width])
10069                 else:
10070                         self._update(color_output.getvalue())
10071
10072                 xtermTitle(" ".join(plain_output.split()))
10073
10074 class ProgressHandler(object):
10075         def __init__(self):
10076                 self.curval = 0
10077                 self.maxval = 0
10078                 self._last_update = 0
10079                 self.min_latency = 0.2
10080
10081         def onProgress(self, maxval, curval):
10082                 self.maxval = maxval
10083                 self.curval = curval
10084                 cur_time = time.time()
10085                 if cur_time - self._last_update >= self.min_latency:
10086                         self._last_update = cur_time
10087                         self.display()
10088
10089         def display(self):
10090                 raise NotImplementedError(self)
10091
10092 class Scheduler(PollScheduler):
10093
10094         _opts_ignore_blockers = \
10095                 frozenset(["--buildpkgonly",
10096                 "--fetchonly", "--fetch-all-uri",
10097                 "--nodeps", "--pretend"])
10098
10099         _opts_no_background = \
10100                 frozenset(["--pretend",
10101                 "--fetchonly", "--fetch-all-uri"])
10102
10103         _opts_no_restart = frozenset(["--buildpkgonly",
10104                 "--fetchonly", "--fetch-all-uri", "--pretend"])
10105
10106         _bad_resume_opts = set(["--ask", "--changelog",
10107                 "--resume", "--skipfirst"])
10108
10109         _fetch_log = "/var/log/emerge-fetch.log"
10110
10111         class _iface_class(SlotObject):
10112                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
10113                         "dblinkElog", "dblinkEmergeLog", "fetch", "register", "schedule",
10114                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
10115                         "unregister")
10116
10117         class _fetch_iface_class(SlotObject):
10118                 __slots__ = ("log_file", "schedule")
10119
10120         _task_queues_class = slot_dict_class(
10121                 ("merge", "jobs", "fetch", "unpack"), prefix="")
10122
10123         class _build_opts_class(SlotObject):
10124                 __slots__ = ("buildpkg", "buildpkgonly",
10125                         "fetch_all_uri", "fetchonly", "pretend")
10126
10127         class _binpkg_opts_class(SlotObject):
10128                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
10129
10130         class _pkg_count_class(SlotObject):
10131                 __slots__ = ("curval", "maxval")
10132
10133         class _emerge_log_class(SlotObject):
10134                 __slots__ = ("xterm_titles",)
10135
10136                 def log(self, *pargs, **kwargs):
10137                         if not self.xterm_titles:
10138                                 # Avoid interference with the scheduler's status display.
10139                                 kwargs.pop("short_msg", None)
10140                         emergelog(self.xterm_titles, *pargs, **kwargs)
10141
10142         class _failed_pkg(SlotObject):
10143                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
10144
10145         class _ConfigPool(object):
10146                 """Interface for a task to temporarily allocate a config
10147                 instance from a pool. This allows a task to be constructed
10148                 long before the config instance actually becomes needed, like
10149                 when prefetchers are constructed for the whole merge list."""
10150                 __slots__ = ("_root", "_allocate", "_deallocate")
10151                 def __init__(self, root, allocate, deallocate):
10152                         self._root = root
10153                         self._allocate = allocate
10154                         self._deallocate = deallocate
10155                 def allocate(self):
10156                         return self._allocate(self._root)
10157                 def deallocate(self, settings):
10158                         self._deallocate(settings)
10159
10160         class _unknown_internal_error(portage.exception.PortageException):
10161                 """
10162                 Used internally to terminate scheduling. The specific reason for
10163                 the failure should have been dumped to stderr.
10164                 """
10165                 def __init__(self, value=""):
10166                         portage.exception.PortageException.__init__(self, value)
10167
10168         def __init__(self, settings, trees, mtimedb, myopts,
10169                 spinner, mergelist, favorites, digraph):
10170                 PollScheduler.__init__(self)
10171                 self.settings = settings
10172                 self.target_root = settings["ROOT"]
10173                 self.trees = trees
10174                 self.myopts = myopts
10175                 self._spinner = spinner
10176                 self._mtimedb = mtimedb
10177                 self._mergelist = mergelist
10178                 self._favorites = favorites
10179                 self._args_set = InternalPackageSet(favorites)
10180                 self._build_opts = self._build_opts_class()
10181                 for k in self._build_opts.__slots__:
10182                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
10183                 self._binpkg_opts = self._binpkg_opts_class()
10184                 for k in self._binpkg_opts.__slots__:
10185                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
10186
10187                 self.curval = 0
10188                 self._logger = self._emerge_log_class()
10189                 self._task_queues = self._task_queues_class()
10190                 for k in self._task_queues.allowed_keys:
10191                         setattr(self._task_queues, k,
10192                                 SequentialTaskQueue())
10193
10194                 # Holds merges that will wait to be executed when no builds are
10195                 # executing. This is useful for system packages since dependencies
10196                 # on system packages are frequently unspecified.
10197                 self._merge_wait_queue = []
10198                 # Holds merges that have been transfered from the merge_wait_queue to
10199                 # the actual merge queue. They are removed from this list upon
10200                 # completion. Other packages can start building only when this list is
10201                 # empty.
10202                 self._merge_wait_scheduled = []
10203
10204                 # Holds system packages and their deep runtime dependencies. Before
10205                 # being merged, these packages go to merge_wait_queue, to be merged
10206                 # when no other packages are building.
10207                 self._deep_system_deps = set()
10208
10209                 # Holds packages to merge which will satisfy currently unsatisfied
10210                 # deep runtime dependencies of system packages. If this is not empty
10211                 # then no parallel builds will be spawned until it is empty. This
10212                 # minimizes the possibility that a build will fail due to the system
10213                 # being in a fragile state. For example, see bug #259954.
10214                 self._unsatisfied_system_deps = set()
10215
10216                 self._status_display = JobStatusDisplay()
10217                 self._max_load = myopts.get("--load-average")
10218                 max_jobs = myopts.get("--jobs")
10219                 if max_jobs is None:
10220                         max_jobs = 1
10221                 self._set_max_jobs(max_jobs)
10222
10223                 # The root where the currently running
10224                 # portage instance is installed.
10225                 self._running_root = trees["/"]["root_config"]
10226                 self.edebug = 0
10227                 if settings.get("PORTAGE_DEBUG", "") == "1":
10228                         self.edebug = 1
10229                 self.pkgsettings = {}
10230                 self._config_pool = {}
10231                 self._blocker_db = {}
10232                 for root in trees:
10233                         self._config_pool[root] = []
10234                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
10235
10236                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
10237                         schedule=self._schedule_fetch)
10238                 self._sched_iface = self._iface_class(
10239                         dblinkEbuildPhase=self._dblink_ebuild_phase,
10240                         dblinkDisplayMerge=self._dblink_display_merge,
10241                         dblinkElog=self._dblink_elog,
10242                         dblinkEmergeLog=self._dblink_emerge_log,
10243                         fetch=fetch_iface, register=self._register,
10244                         schedule=self._schedule_wait,
10245                         scheduleSetup=self._schedule_setup,
10246                         scheduleUnpack=self._schedule_unpack,
10247                         scheduleYield=self._schedule_yield,
10248                         unregister=self._unregister)
10249
10250                 self._prefetchers = weakref.WeakValueDictionary()
10251                 self._pkg_queue = []
10252                 self._completed_tasks = set()
10253
10254                 self._failed_pkgs = []
10255                 self._failed_pkgs_all = []
10256                 self._failed_pkgs_die_msgs = []
10257                 self._post_mod_echo_msgs = []
10258                 self._parallel_fetch = False
10259                 merge_count = len([x for x in mergelist \
10260                         if isinstance(x, Package) and x.operation == "merge"])
10261                 self._pkg_count = self._pkg_count_class(
10262                         curval=0, maxval=merge_count)
10263                 self._status_display.maxval = self._pkg_count.maxval
10264
10265                 # The load average takes some time to respond when new
10266                 # jobs are added, so we need to limit the rate of adding
10267                 # new jobs.
10268                 self._job_delay_max = 10
10269                 self._job_delay_factor = 1.0
10270                 self._job_delay_exp = 1.5
10271                 self._previous_job_start_time = None
10272
10273                 self._set_digraph(digraph)
10274
10275                 # This is used to memoize the _choose_pkg() result when
10276                 # no packages can be chosen until one of the existing
10277                 # jobs completes.
10278                 self._choose_pkg_return_early = False
10279
10280                 features = self.settings.features
10281                 if "parallel-fetch" in features and \
10282                         not ("--pretend" in self.myopts or \
10283                         "--fetch-all-uri" in self.myopts or \
10284                         "--fetchonly" in self.myopts):
10285                         if "distlocks" not in features:
10286                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10287                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
10288                                         "requires the distlocks feature enabled"+"\n",
10289                                         noiselevel=-1)
10290                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
10291                                         "thus parallel-fetching is being disabled"+"\n",
10292                                         noiselevel=-1)
10293                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
10294                         elif len(mergelist) > 1:
10295                                 self._parallel_fetch = True
10296
10297                 if self._parallel_fetch:
10298                                 # clear out existing fetch log if it exists
10299                                 try:
10300                                         open(self._fetch_log, 'w')
10301                                 except EnvironmentError:
10302                                         pass
10303
10304                 self._running_portage = None
10305                 portage_match = self._running_root.trees["vartree"].dbapi.match(
10306                         portage.const.PORTAGE_PACKAGE_ATOM)
10307                 if portage_match:
10308                         cpv = portage_match.pop()
10309                         self._running_portage = self._pkg(cpv, "installed",
10310                                 self._running_root, installed=True)
10311
10312         def _poll(self, timeout=None):
10313                 self._schedule()
10314                 PollScheduler._poll(self, timeout=timeout)
10315
10316         def _set_max_jobs(self, max_jobs):
10317                 self._max_jobs = max_jobs
10318                 self._task_queues.jobs.max_jobs = max_jobs
10319
10320         def _background_mode(self):
10321                 """
10322                 Check if background mode is enabled and adjust states as necessary.
10323
10324                 @rtype: bool
10325                 @returns: True if background mode is enabled, False otherwise.
10326                 """
10327                 background = (self._max_jobs is True or \
10328                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
10329                         not bool(self._opts_no_background.intersection(self.myopts))
10330
10331                 if background:
10332                         interactive_tasks = self._get_interactive_tasks()
10333                         if interactive_tasks:
10334                                 background = False
10335                                 writemsg_level(">>> Sending package output to stdio due " + \
10336                                         "to interactive package(s):\n",
10337                                         level=logging.INFO, noiselevel=-1)
10338                                 msg = [""]
10339                                 for pkg in interactive_tasks:
10340                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
10341                                         if pkg.root != "/":
10342                                                 pkg_str += " for " + pkg.root
10343                                         msg.append(pkg_str)
10344                                 msg.append("")
10345                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
10346                                         level=logging.INFO, noiselevel=-1)
10347                                 if self._max_jobs is True or self._max_jobs > 1:
10348                                         self._set_max_jobs(1)
10349                                         writemsg_level(">>> Setting --jobs=1 due " + \
10350                                                 "to the above interactive package(s)\n",
10351                                                 level=logging.INFO, noiselevel=-1)
10352
10353                 self._status_display.quiet = \
10354                         not background or \
10355                         ("--quiet" in self.myopts and \
10356                         "--verbose" not in self.myopts)
10357
10358                 self._logger.xterm_titles = \
10359                         "notitles" not in self.settings.features and \
10360                         self._status_display.quiet
10361
10362                 return background
10363
10364         def _get_interactive_tasks(self):
10365                 from portage import flatten
10366                 from portage.dep import use_reduce, paren_reduce
10367                 interactive_tasks = []
10368                 for task in self._mergelist:
10369                         if not (isinstance(task, Package) and \
10370                                 task.operation == "merge"):
10371                                 continue
10372                         try:
10373                                 properties = flatten(use_reduce(paren_reduce(
10374                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
10375                         except portage.exception.InvalidDependString, e:
10376                                 show_invalid_depstring_notice(task,
10377                                         task.metadata["PROPERTIES"], str(e))
10378                                 raise self._unknown_internal_error()
10379                         if "interactive" in properties:
10380                                 interactive_tasks.append(task)
10381                 return interactive_tasks
10382
10383         def _set_digraph(self, digraph):
10384                 if "--nodeps" in self.myopts or \
10385                         (self._max_jobs is not True and self._max_jobs < 2):
10386                         # save some memory
10387                         self._digraph = None
10388                         return
10389
10390                 self._digraph = digraph
10391                 self._find_system_deps()
10392                 self._prune_digraph()
10393                 self._prevent_builddir_collisions()
10394
10395         def _find_system_deps(self):
10396                 """
10397                 Find system packages and their deep runtime dependencies. Before being
10398                 merged, these packages go to merge_wait_queue, to be merged when no
10399                 other packages are building.
10400                 """
10401                 deep_system_deps = self._deep_system_deps
10402                 deep_system_deps.clear()
10403                 deep_system_deps.update(
10404                         _find_deep_system_runtime_deps(self._digraph))
10405                 deep_system_deps.difference_update([pkg for pkg in \
10406                         deep_system_deps if pkg.operation != "merge"])
10407
10408         def _prune_digraph(self):
10409                 """
10410                 Prune any root nodes that are irrelevant.
10411                 """
10412
10413                 graph = self._digraph
10414                 completed_tasks = self._completed_tasks
10415                 removed_nodes = set()
10416                 while True:
10417                         for node in graph.root_nodes():
10418                                 if not isinstance(node, Package) or \
10419                                         (node.installed and node.operation == "nomerge") or \
10420                                         node.onlydeps or \
10421                                         node in completed_tasks:
10422                                         removed_nodes.add(node)
10423                         if removed_nodes:
10424                                 graph.difference_update(removed_nodes)
10425                         if not removed_nodes:
10426                                 break
10427                         removed_nodes.clear()
10428
10429         def _prevent_builddir_collisions(self):
10430                 """
10431                 When building stages, sometimes the same exact cpv needs to be merged
10432                 to both $ROOTs. Add edges to the digraph in order to avoid collisions
10433                 in the builddir. Currently, normal file locks would be inappropriate
10434                 for this purpose since emerge holds all of it's build dir locks from
10435                 the main process.
10436                 """
10437                 cpv_map = {}
10438                 for pkg in self._mergelist:
10439                         if not isinstance(pkg, Package):
10440                                 # a satisfied blocker
10441                                 continue
10442                         if pkg.installed:
10443                                 continue
10444                         if pkg.cpv not in cpv_map:
10445                                 cpv_map[pkg.cpv] = [pkg]
10446                                 continue
10447                         for earlier_pkg in cpv_map[pkg.cpv]:
10448                                 self._digraph.add(earlier_pkg, pkg,
10449                                         priority=DepPriority(buildtime=True))
10450                         cpv_map[pkg.cpv].append(pkg)
10451
10452         class _pkg_failure(portage.exception.PortageException):
10453                 """
10454                 An instance of this class is raised by unmerge() when
10455                 an uninstallation fails.
10456                 """
10457                 status = 1
10458                 def __init__(self, *pargs):
10459                         portage.exception.PortageException.__init__(self, pargs)
10460                         if pargs:
10461                                 self.status = pargs[0]
10462
10463         def _schedule_fetch(self, fetcher):
10464                 """
10465                 Schedule a fetcher on the fetch queue, in order to
10466                 serialize access to the fetch log.
10467                 """
10468                 self._task_queues.fetch.addFront(fetcher)
10469
10470         def _schedule_setup(self, setup_phase):
10471                 """
10472                 Schedule a setup phase on the merge queue, in order to
10473                 serialize unsandboxed access to the live filesystem.
10474                 """
10475                 self._task_queues.merge.addFront(setup_phase)
10476                 self._schedule()
10477
10478         def _schedule_unpack(self, unpack_phase):
10479                 """
10480                 Schedule an unpack phase on the unpack queue, in order
10481                 to serialize $DISTDIR access for live ebuilds.
10482                 """
10483                 self._task_queues.unpack.add(unpack_phase)
10484
10485         def _find_blockers(self, new_pkg):
10486                 """
10487                 Returns a callable which should be called only when
10488                 the vdb lock has been acquired.
10489                 """
10490                 def get_blockers():
10491                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
10492                 return get_blockers
10493
10494         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
10495                 if self._opts_ignore_blockers.intersection(self.myopts):
10496                         return None
10497
10498                 # Call gc.collect() here to avoid heap overflow that
10499                 # triggers 'Cannot allocate memory' errors (reported
10500                 # with python-2.5).
10501                 import gc
10502                 gc.collect()
10503
10504                 blocker_db = self._blocker_db[new_pkg.root]
10505
10506                 blocker_dblinks = []
10507                 for blocking_pkg in blocker_db.findInstalledBlockers(
10508                         new_pkg, acquire_lock=acquire_lock):
10509                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
10510                                 continue
10511                         if new_pkg.cpv == blocking_pkg.cpv:
10512                                 continue
10513                         blocker_dblinks.append(portage.dblink(
10514                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
10515                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
10516                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
10517
10518                 gc.collect()
10519
10520                 return blocker_dblinks
10521
10522         def _dblink_pkg(self, pkg_dblink):
10523                 cpv = pkg_dblink.mycpv
10524                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10525                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10526                 installed = type_name == "installed"
10527                 return self._pkg(cpv, type_name, root_config, installed=installed)
10528
10529         def _append_to_log_path(self, log_path, msg):
10530                 f = open(log_path, 'a')
10531                 try:
10532                         f.write(msg)
10533                 finally:
10534                         f.close()
10535
10536         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10537
10538                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10539                 log_file = None
10540                 out = sys.stdout
10541                 background = self._background
10542
10543                 if background and log_path is not None:
10544                         log_file = open(log_path, 'a')
10545                         out = log_file
10546
10547                 try:
10548                         for msg in msgs:
10549                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10550                 finally:
10551                         if log_file is not None:
10552                                 log_file.close()
10553
10554         def _dblink_emerge_log(self, msg):
10555                 self._logger.log(msg)
10556
10557         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10558                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10559                 background = self._background
10560
10561                 if log_path is None:
10562                         if not (background and level < logging.WARN):
10563                                 portage.util.writemsg_level(msg,
10564                                         level=level, noiselevel=noiselevel)
10565                 else:
10566                         if not background:
10567                                 portage.util.writemsg_level(msg,
10568                                         level=level, noiselevel=noiselevel)
10569                         self._append_to_log_path(log_path, msg)
10570
10571         def _dblink_ebuild_phase(self,
10572                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10573                 """
10574                 Using this callback for merge phases allows the scheduler
10575                 to run while these phases execute asynchronously, and allows
10576                 the scheduler control output handling.
10577                 """
10578
10579                 scheduler = self._sched_iface
10580                 settings = pkg_dblink.settings
10581                 pkg = self._dblink_pkg(pkg_dblink)
10582                 background = self._background
10583                 log_path = settings.get("PORTAGE_LOG_FILE")
10584
10585                 ebuild_phase = EbuildPhase(background=background,
10586                         pkg=pkg, phase=phase, scheduler=scheduler,
10587                         settings=settings, tree=pkg_dblink.treetype)
10588                 ebuild_phase.start()
10589                 ebuild_phase.wait()
10590
10591                 return ebuild_phase.returncode
10592
10593         def _generate_digests(self):
10594                 """
10595                 Generate digests if necessary for --digests or FEATURES=digest.
10596                 In order to avoid interference, this must done before parallel
10597                 tasks are started.
10598                 """
10599
10600                 if '--fetchonly' in self.myopts:
10601                         return os.EX_OK
10602
10603                 digest = '--digest' in self.myopts
10604                 if not digest:
10605                         for pkgsettings in self.pkgsettings.itervalues():
10606                                 if 'digest' in pkgsettings.features:
10607                                         digest = True
10608                                         break
10609
10610                 if not digest:
10611                         return os.EX_OK
10612
10613                 for x in self._mergelist:
10614                         if not isinstance(x, Package) or \
10615                                 x.type_name != 'ebuild' or \
10616                                 x.operation != 'merge':
10617                                 continue
10618                         pkgsettings = self.pkgsettings[x.root]
10619                         if '--digest' not in self.myopts and \
10620                                 'digest' not in pkgsettings.features:
10621                                 continue
10622                         portdb = x.root_config.trees['porttree'].dbapi
10623                         ebuild_path = portdb.findname(x.cpv)
10624                         if not ebuild_path:
10625                                 writemsg_level(
10626                                         "!!! Could not locate ebuild for '%s'.\n" \
10627                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10628                                 return 1
10629                         pkgsettings['O'] = os.path.dirname(ebuild_path)
10630                         if not portage.digestgen([], pkgsettings, myportdb=portdb):
10631                                 writemsg_level(
10632                                         "!!! Unable to generate manifest for '%s'.\n" \
10633                                         % x.cpv, level=logging.ERROR, noiselevel=-1)
10634                                 return 1
10635
10636                 return os.EX_OK
10637
10638         def _check_manifests(self):
10639                 # Verify all the manifests now so that the user is notified of failure
10640                 # as soon as possible.
10641                 if "strict" not in self.settings.features or \
10642                         "--fetchonly" in self.myopts or \
10643                         "--fetch-all-uri" in self.myopts:
10644                         return os.EX_OK
10645
10646                 shown_verifying_msg = False
10647                 quiet_settings = {}
10648                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10649                         quiet_config = portage.config(clone=pkgsettings)
10650                         quiet_config["PORTAGE_QUIET"] = "1"
10651                         quiet_config.backup_changes("PORTAGE_QUIET")
10652                         quiet_settings[myroot] = quiet_config
10653                         del quiet_config
10654
10655                 for x in self._mergelist:
10656                         if not isinstance(x, Package) or \
10657                                 x.type_name != "ebuild":
10658                                 continue
10659
10660                         if not shown_verifying_msg:
10661                                 shown_verifying_msg = True
10662                                 self._status_msg("Verifying ebuild manifests")
10663
10664                         root_config = x.root_config
10665                         portdb = root_config.trees["porttree"].dbapi
10666                         quiet_config = quiet_settings[root_config.root]
10667                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10668                         if not portage.digestcheck([], quiet_config, strict=True):
10669                                 return 1
10670
10671                 return os.EX_OK
10672
10673         def _add_prefetchers(self):
10674
10675                 if not self._parallel_fetch:
10676                         return
10677
10678                 if self._parallel_fetch:
10679                         self._status_msg("Starting parallel fetch")
10680
10681                         prefetchers = self._prefetchers
10682                         getbinpkg = "--getbinpkg" in self.myopts
10683
10684                         # In order to avoid "waiting for lock" messages
10685                         # at the beginning, which annoy users, never
10686                         # spawn a prefetcher for the first package.
10687                         for pkg in self._mergelist[1:]:
10688                                 prefetcher = self._create_prefetcher(pkg)
10689                                 if prefetcher is not None:
10690                                         self._task_queues.fetch.add(prefetcher)
10691                                         prefetchers[pkg] = prefetcher
10692
10693         def _create_prefetcher(self, pkg):
10694                 """
10695                 @return: a prefetcher, or None if not applicable
10696                 """
10697                 prefetcher = None
10698
10699                 if not isinstance(pkg, Package):
10700                         pass
10701
10702                 elif pkg.type_name == "ebuild":
10703
10704                         prefetcher = EbuildFetcher(background=True,
10705                                 config_pool=self._ConfigPool(pkg.root,
10706                                 self._allocate_config, self._deallocate_config),
10707                                 fetchonly=1, logfile=self._fetch_log,
10708                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10709
10710                 elif pkg.type_name == "binary" and \
10711                         "--getbinpkg" in self.myopts and \
10712                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10713
10714                         prefetcher = BinpkgPrefetcher(background=True,
10715                                 pkg=pkg, scheduler=self._sched_iface)
10716
10717                 return prefetcher
10718
10719         def _is_restart_scheduled(self):
10720                 """
10721                 Check if the merge list contains a replacement
10722                 for the current running instance, that will result
10723                 in restart after merge.
10724                 @rtype: bool
10725                 @returns: True if a restart is scheduled, False otherwise.
10726                 """
10727                 if self._opts_no_restart.intersection(self.myopts):
10728                         return False
10729
10730                 mergelist = self._mergelist
10731
10732                 for i, pkg in enumerate(mergelist):
10733                         if self._is_restart_necessary(pkg) and \
10734                                 i != len(mergelist) - 1:
10735                                 return True
10736
10737                 return False
10738
10739         def _is_restart_necessary(self, pkg):
10740                 """
10741                 @return: True if merging the given package
10742                         requires restart, False otherwise.
10743                 """
10744
10745                 # Figure out if we need a restart.
10746                 if pkg.root == self._running_root.root and \
10747                         portage.match_from_list(
10748                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10749                         if self._running_portage:
10750                                 return pkg.cpv != self._running_portage.cpv
10751                         return True
10752                 return False
10753
10754         def _restart_if_necessary(self, pkg):
10755                 """
10756                 Use execv() to restart emerge. This happens
10757                 if portage upgrades itself and there are
10758                 remaining packages in the list.
10759                 """
10760
10761                 if self._opts_no_restart.intersection(self.myopts):
10762                         return
10763
10764                 if not self._is_restart_necessary(pkg):
10765                         return
10766
10767                 if pkg == self._mergelist[-1]:
10768                         return
10769
10770                 self._main_loop_cleanup()
10771
10772                 logger = self._logger
10773                 pkg_count = self._pkg_count
10774                 mtimedb = self._mtimedb
10775                 bad_resume_opts = self._bad_resume_opts
10776
10777                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10778                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10779
10780                 logger.log(" *** RESTARTING " + \
10781                         "emerge via exec() after change of " + \
10782                         "portage version.")
10783
10784                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10785                 mtimedb.commit()
10786                 portage.run_exitfuncs()
10787                 mynewargv = [sys.argv[0], "--resume"]
10788                 resume_opts = self.myopts.copy()
10789                 # For automatic resume, we need to prevent
10790                 # any of bad_resume_opts from leaking in
10791                 # via EMERGE_DEFAULT_OPTS.
10792                 resume_opts["--ignore-default-opts"] = True
10793                 for myopt, myarg in resume_opts.iteritems():
10794                         if myopt not in bad_resume_opts:
10795                                 if myarg is True:
10796                                         mynewargv.append(myopt)
10797                                 else:
10798                                         mynewargv.append(myopt +"="+ str(myarg))
10799                 # priority only needs to be adjusted on the first run
10800                 os.environ["PORTAGE_NICENESS"] = "0"
10801                 os.execv(mynewargv[0], mynewargv)
10802
10803         def merge(self):
10804
10805                 if "--resume" in self.myopts:
10806                         # We're resuming.
10807                         portage.writemsg_stdout(
10808                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10809                         self._logger.log(" *** Resuming merge...")
10810
10811                 self._save_resume_list()
10812
10813                 try:
10814                         self._background = self._background_mode()
10815                 except self._unknown_internal_error:
10816                         return 1
10817
10818                 for root in self.trees:
10819                         root_config = self.trees[root]["root_config"]
10820
10821                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10822                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10823                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10824                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10825                         if not tmpdir or not os.path.isdir(tmpdir):
10826                                 msg = "The directory specified in your " + \
10827                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10828                                 "does not exist. Please create this " + \
10829                                 "directory or correct your PORTAGE_TMPDIR setting."
10830                                 msg = textwrap.wrap(msg, 70)
10831                                 out = portage.output.EOutput()
10832                                 for l in msg:
10833                                         out.eerror(l)
10834                                 return 1
10835
10836                         if self._background:
10837                                 root_config.settings.unlock()
10838                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10839                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10840                                 root_config.settings.lock()
10841
10842                         self.pkgsettings[root] = portage.config(
10843                                 clone=root_config.settings)
10844
10845                 rval = self._generate_digests()
10846                 if rval != os.EX_OK:
10847                         return rval
10848
10849                 rval = self._check_manifests()
10850                 if rval != os.EX_OK:
10851                         return rval
10852
10853                 keep_going = "--keep-going" in self.myopts
10854                 fetchonly = self._build_opts.fetchonly
10855                 mtimedb = self._mtimedb
10856                 failed_pkgs = self._failed_pkgs
10857
10858                 while True:
10859                         rval = self._merge()
10860                         if rval == os.EX_OK or fetchonly or not keep_going:
10861                                 break
10862                         if "resume" not in mtimedb:
10863                                 break
10864                         mergelist = self._mtimedb["resume"].get("mergelist")
10865                         if not mergelist:
10866                                 break
10867
10868                         if not failed_pkgs:
10869                                 break
10870
10871                         for failed_pkg in failed_pkgs:
10872                                 mergelist.remove(list(failed_pkg.pkg))
10873
10874                         self._failed_pkgs_all.extend(failed_pkgs)
10875                         del failed_pkgs[:]
10876
10877                         if not mergelist:
10878                                 break
10879
10880                         if not self._calc_resume_list():
10881                                 break
10882
10883                         clear_caches(self.trees)
10884                         if not self._mergelist:
10885                                 break
10886
10887                         self._save_resume_list()
10888                         self._pkg_count.curval = 0
10889                         self._pkg_count.maxval = len([x for x in self._mergelist \
10890                                 if isinstance(x, Package) and x.operation == "merge"])
10891                         self._status_display.maxval = self._pkg_count.maxval
10892
10893                 self._logger.log(" *** Finished. Cleaning up...")
10894
10895                 if failed_pkgs:
10896                         self._failed_pkgs_all.extend(failed_pkgs)
10897                         del failed_pkgs[:]
10898
10899                 background = self._background
10900                 failure_log_shown = False
10901                 if background and len(self._failed_pkgs_all) == 1:
10902                         # If only one package failed then just show it's
10903                         # whole log for easy viewing.
10904                         failed_pkg = self._failed_pkgs_all[-1]
10905                         build_dir = failed_pkg.build_dir
10906                         log_file = None
10907
10908                         log_paths = [failed_pkg.build_log]
10909
10910                         log_path = self._locate_failure_log(failed_pkg)
10911                         if log_path is not None:
10912                                 try:
10913                                         log_file = open(log_path)
10914                                 except IOError:
10915                                         pass
10916
10917                         if log_file is not None:
10918                                 try:
10919                                         for line in log_file:
10920                                                 writemsg_level(line, noiselevel=-1)
10921                                 finally:
10922                                         log_file.close()
10923                                 failure_log_shown = True
10924
10925                 # Dump mod_echo output now since it tends to flood the terminal.
10926                 # This allows us to avoid having more important output, generated
10927                 # later, from being swept away by the mod_echo output.
10928                 mod_echo_output =  _flush_elog_mod_echo()
10929
10930                 if background and not failure_log_shown and \
10931                         self._failed_pkgs_all and \
10932                         self._failed_pkgs_die_msgs and \
10933                         not mod_echo_output:
10934
10935                         printer = portage.output.EOutput()
10936                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10937                                 root_msg = ""
10938                                 if mysettings["ROOT"] != "/":
10939                                         root_msg = " merged to %s" % mysettings["ROOT"]
10940                                 print
10941                                 printer.einfo("Error messages for package %s%s:" % \
10942                                         (colorize("INFORM", key), root_msg))
10943                                 print
10944                                 for phase in portage.const.EBUILD_PHASES:
10945                                         if phase not in logentries:
10946                                                 continue
10947                                         for msgtype, msgcontent in logentries[phase]:
10948                                                 if isinstance(msgcontent, basestring):
10949                                                         msgcontent = [msgcontent]
10950                                                 for line in msgcontent:
10951                                                         printer.eerror(line.strip("\n"))
10952
10953                 if self._post_mod_echo_msgs:
10954                         for msg in self._post_mod_echo_msgs:
10955                                 msg()
10956
10957                 if len(self._failed_pkgs_all) > 1 or \
10958                         (self._failed_pkgs_all and "--keep-going" in self.myopts):
10959                         if len(self._failed_pkgs_all) > 1:
10960                                 msg = "The following %d packages have " % \
10961                                         len(self._failed_pkgs_all) + \
10962                                         "failed to build or install:"
10963                         else:
10964                                 msg = "The following package has " + \
10965                                         "failed to build or install:"
10966                         prefix = bad(" * ")
10967                         writemsg(prefix + "\n", noiselevel=-1)
10968                         from textwrap import wrap
10969                         for line in wrap(msg, 72):
10970                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10971                         writemsg(prefix + "\n", noiselevel=-1)
10972                         for failed_pkg in self._failed_pkgs_all:
10973                                 writemsg("%s\t%s\n" % (prefix,
10974                                         colorize("INFORM", str(failed_pkg.pkg))),
10975                                         noiselevel=-1)
10976                         writemsg(prefix + "\n", noiselevel=-1)
10977
10978                 return rval
10979
10980         def _elog_listener(self, mysettings, key, logentries, fulltext):
10981                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10982                 if errors:
10983                         self._failed_pkgs_die_msgs.append(
10984                                 (mysettings, key, errors))
10985
10986         def _locate_failure_log(self, failed_pkg):
10987
10988                 build_dir = failed_pkg.build_dir
10989                 log_file = None
10990
10991                 log_paths = [failed_pkg.build_log]
10992
10993                 for log_path in log_paths:
10994                         if not log_path:
10995                                 continue
10996
10997                         try:
10998                                 log_size = os.stat(log_path).st_size
10999                         except OSError:
11000                                 continue
11001
11002                         if log_size == 0:
11003                                 continue
11004
11005                         return log_path
11006
11007                 return None
11008
11009         def _add_packages(self):
11010                 pkg_queue = self._pkg_queue
11011                 for pkg in self._mergelist:
11012                         if isinstance(pkg, Package):
11013                                 pkg_queue.append(pkg)
11014                         elif isinstance(pkg, Blocker):
11015                                 pass
11016
11017         def _system_merge_started(self, merge):
11018                 """
11019                 Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
11020                 """
11021                 graph = self._digraph
11022                 if graph is None:
11023                         return
11024                 pkg = merge.merge.pkg
11025
11026                 # Skip this if $ROOT != / since it shouldn't matter if there
11027                 # are unsatisfied system runtime deps in this case.
11028                 if pkg.root != '/':
11029                         return
11030
11031                 completed_tasks = self._completed_tasks
11032                 unsatisfied = self._unsatisfied_system_deps
11033
11034                 def ignore_non_runtime_or_satisfied(priority):
11035                         """
11036                         Ignore non-runtime and satisfied runtime priorities.
11037                         """
11038                         if isinstance(priority, DepPriority) and \
11039                                 not priority.satisfied and \
11040                                 (priority.runtime or priority.runtime_post):
11041                                 return False
11042                         return True
11043
11044                 # When checking for unsatisfied runtime deps, only check
11045                 # direct deps since indirect deps are checked when the
11046                 # corresponding parent is merged.
11047                 for child in graph.child_nodes(pkg,
11048                         ignore_priority=ignore_non_runtime_or_satisfied):
11049                         if not isinstance(child, Package) or \
11050                                 child.operation == 'uninstall':
11051                                 continue
11052                         if child is pkg:
11053                                 continue
11054                         if child.operation == 'merge' and \
11055                                 child not in completed_tasks:
11056                                 unsatisfied.add(child)
11057
11058         def _merge_wait_exit_handler(self, task):
11059                 self._merge_wait_scheduled.remove(task)
11060                 self._merge_exit(task)
11061
11062         def _merge_exit(self, merge):
11063                 self._do_merge_exit(merge)
11064                 self._deallocate_config(merge.merge.settings)
11065                 if merge.returncode == os.EX_OK and \
11066                         not merge.merge.pkg.installed:
11067                         self._status_display.curval += 1
11068                 self._status_display.merges = len(self._task_queues.merge)
11069                 self._schedule()
11070
11071         def _do_merge_exit(self, merge):
11072                 pkg = merge.merge.pkg
11073                 if merge.returncode != os.EX_OK:
11074                         settings = merge.merge.settings
11075                         build_dir = settings.get("PORTAGE_BUILDDIR")
11076                         build_log = settings.get("PORTAGE_LOG_FILE")
11077
11078                         self._failed_pkgs.append(self._failed_pkg(
11079                                 build_dir=build_dir, build_log=build_log,
11080                                 pkg=pkg,
11081                                 returncode=merge.returncode))
11082                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
11083
11084                         self._status_display.failed = len(self._failed_pkgs)
11085                         return
11086
11087                 self._task_complete(pkg)
11088                 pkg_to_replace = merge.merge.pkg_to_replace
11089                 if pkg_to_replace is not None:
11090                         # When a package is replaced, mark it's uninstall
11091                         # task complete (if any).
11092                         uninst_hash_key = \
11093                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
11094                         self._task_complete(uninst_hash_key)
11095
11096                 if pkg.installed:
11097                         return
11098
11099                 self._restart_if_necessary(pkg)
11100
11101                 # Call mtimedb.commit() after each merge so that
11102                 # --resume still works after being interrupted
11103                 # by reboot, sigkill or similar.
11104                 mtimedb = self._mtimedb
11105                 mtimedb["resume"]["mergelist"].remove(list(pkg))
11106                 if not mtimedb["resume"]["mergelist"]:
11107                         del mtimedb["resume"]
11108                 mtimedb.commit()
11109
11110         def _build_exit(self, build):
11111                 if build.returncode == os.EX_OK:
11112                         self.curval += 1
11113                         merge = PackageMerge(merge=build)
11114                         if not build.build_opts.buildpkgonly and \
11115                                 build.pkg in self._deep_system_deps:
11116                                 # Since dependencies on system packages are frequently
11117                                 # unspecified, merge them only when no builds are executing.
11118                                 self._merge_wait_queue.append(merge)
11119                                 merge.addStartListener(self._system_merge_started)
11120                         else:
11121                                 merge.addExitListener(self._merge_exit)
11122                                 self._task_queues.merge.add(merge)
11123                                 self._status_display.merges = len(self._task_queues.merge)
11124                 else:
11125                         settings = build.settings
11126                         build_dir = settings.get("PORTAGE_BUILDDIR")
11127                         build_log = settings.get("PORTAGE_LOG_FILE")
11128
11129                         self._failed_pkgs.append(self._failed_pkg(
11130                                 build_dir=build_dir, build_log=build_log,
11131                                 pkg=build.pkg,
11132                                 returncode=build.returncode))
11133                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
11134
11135                         self._status_display.failed = len(self._failed_pkgs)
11136                         self._deallocate_config(build.settings)
11137                 self._jobs -= 1
11138                 self._status_display.running = self._jobs
11139                 self._schedule()
11140
11141         def _extract_exit(self, build):
11142                 self._build_exit(build)
11143
11144         def _task_complete(self, pkg):
11145                 self._completed_tasks.add(pkg)
11146                 self._unsatisfied_system_deps.discard(pkg)
11147                 self._choose_pkg_return_early = False
11148
11149         def _merge(self):
11150
11151                 self._add_prefetchers()
11152                 self._add_packages()
11153                 pkg_queue = self._pkg_queue
11154                 failed_pkgs = self._failed_pkgs
11155                 portage.locks._quiet = self._background
11156                 portage.elog._emerge_elog_listener = self._elog_listener
11157                 rval = os.EX_OK
11158
11159                 try:
11160                         self._main_loop()
11161                 finally:
11162                         self._main_loop_cleanup()
11163                         portage.locks._quiet = False
11164                         portage.elog._emerge_elog_listener = None
11165                         if failed_pkgs:
11166                                 rval = failed_pkgs[-1].returncode
11167
11168                 return rval
11169
11170         def _main_loop_cleanup(self):
11171                 del self._pkg_queue[:]
11172                 self._completed_tasks.clear()
11173                 self._deep_system_deps.clear()
11174                 self._unsatisfied_system_deps.clear()
11175                 self._choose_pkg_return_early = False
11176                 self._status_display.reset()
11177                 self._digraph = None
11178                 self._task_queues.fetch.clear()
11179
11180         def _choose_pkg(self):
11181                 """
11182                 Choose a task that has all it's dependencies satisfied.
11183                 """
11184
11185                 if self._choose_pkg_return_early:
11186                         return None
11187
11188                 if self._digraph is None:
11189                         if (self._jobs or self._task_queues.merge) and \
11190                                 not ("--nodeps" in self.myopts and \
11191                                 (self._max_jobs is True or self._max_jobs > 1)):
11192                                 self._choose_pkg_return_early = True
11193                                 return None
11194                         return self._pkg_queue.pop(0)
11195
11196                 if not (self._jobs or self._task_queues.merge):
11197                         return self._pkg_queue.pop(0)
11198
11199                 self._prune_digraph()
11200
11201                 chosen_pkg = None
11202                 later = set(self._pkg_queue)
11203                 for pkg in self._pkg_queue:
11204                         later.remove(pkg)
11205                         if not self._dependent_on_scheduled_merges(pkg, later):
11206                                 chosen_pkg = pkg
11207                                 break
11208
11209                 if chosen_pkg is not None:
11210                         self._pkg_queue.remove(chosen_pkg)
11211
11212                 if chosen_pkg is None:
11213                         # There's no point in searching for a package to
11214                         # choose until at least one of the existing jobs
11215                         # completes.
11216                         self._choose_pkg_return_early = True
11217
11218                 return chosen_pkg
11219
11220         def _dependent_on_scheduled_merges(self, pkg, later):
11221                 """
11222                 Traverse the subgraph of the given packages deep dependencies
11223                 to see if it contains any scheduled merges.
11224                 @param pkg: a package to check dependencies for
11225                 @type pkg: Package
11226                 @param later: packages for which dependence should be ignored
11227                         since they will be merged later than pkg anyway and therefore
11228                         delaying the merge of pkg will not result in a more optimal
11229                         merge order
11230                 @type later: set
11231                 @rtype: bool
11232                 @returns: True if the package is dependent, False otherwise.
11233                 """
11234
11235                 graph = self._digraph
11236                 completed_tasks = self._completed_tasks
11237
11238                 dependent = False
11239                 traversed_nodes = set([pkg])
11240                 direct_deps = graph.child_nodes(pkg)
11241                 node_stack = direct_deps
11242                 direct_deps = frozenset(direct_deps)
11243                 while node_stack:
11244                         node = node_stack.pop()
11245                         if node in traversed_nodes:
11246                                 continue
11247                         traversed_nodes.add(node)
11248                         if not ((node.installed and node.operation == "nomerge") or \
11249                                 (node.operation == "uninstall" and \
11250                                 node not in direct_deps) or \
11251                                 node in completed_tasks or \
11252                                 node in later):
11253                                 dependent = True
11254                                 break
11255                         node_stack.extend(graph.child_nodes(node))
11256
11257                 return dependent
11258
11259         def _allocate_config(self, root):
11260                 """
11261                 Allocate a unique config instance for a task in order
11262                 to prevent interference between parallel tasks.
11263                 """
11264                 if self._config_pool[root]:
11265                         temp_settings = self._config_pool[root].pop()
11266                 else:
11267                         temp_settings = portage.config(clone=self.pkgsettings[root])
11268                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
11269                 # performance reasons, call it here to make sure all settings from the
11270                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
11271                 temp_settings.reload()
11272                 temp_settings.reset()
11273                 return temp_settings
11274
11275         def _deallocate_config(self, settings):
11276                 self._config_pool[settings["ROOT"]].append(settings)
11277
11278         def _main_loop(self):
11279
11280                 # Only allow 1 job max if a restart is scheduled
11281                 # due to portage update.
11282                 if self._is_restart_scheduled() or \
11283                         self._opts_no_background.intersection(self.myopts):
11284                         self._set_max_jobs(1)
11285
11286                 merge_queue = self._task_queues.merge
11287
11288                 while self._schedule():
11289                         if self._poll_event_handlers:
11290                                 self._poll_loop()
11291
11292                 while True:
11293                         self._schedule()
11294                         if not (self._jobs or merge_queue):
11295                                 break
11296                         if self._poll_event_handlers:
11297                                 self._poll_loop()
11298
11299         def _keep_scheduling(self):
11300                 return bool(self._pkg_queue and \
11301                         not (self._failed_pkgs and not self._build_opts.fetchonly))
11302
11303         def _schedule_tasks(self):
11304
11305                 # When the number of jobs drops to zero, process all waiting merges.
11306                 if not self._jobs and self._merge_wait_queue:
11307                         for task in self._merge_wait_queue:
11308                                 task.addExitListener(self._merge_wait_exit_handler)
11309                                 self._task_queues.merge.add(task)
11310                         self._status_display.merges = len(self._task_queues.merge)
11311                         self._merge_wait_scheduled.extend(self._merge_wait_queue)
11312                         del self._merge_wait_queue[:]
11313
11314                 self._schedule_tasks_imp()
11315                 self._status_display.display()
11316
11317                 state_change = 0
11318                 for q in self._task_queues.values():
11319                         if q.schedule():
11320                                 state_change += 1
11321
11322                 # Cancel prefetchers if they're the only reason
11323                 # the main poll loop is still running.
11324                 if self._failed_pkgs and not self._build_opts.fetchonly and \
11325                         not (self._jobs or self._task_queues.merge) and \
11326                         self._task_queues.fetch:
11327                         self._task_queues.fetch.clear()
11328                         state_change += 1
11329
11330                 if state_change:
11331                         self._schedule_tasks_imp()
11332                         self._status_display.display()
11333
11334                 return self._keep_scheduling()
11335
11336         def _job_delay(self):
11337                 """
11338                 @rtype: bool
11339                 @returns: True if job scheduling should be delayed, False otherwise.
11340                 """
11341
11342                 if self._jobs and self._max_load is not None:
11343
11344                         current_time = time.time()
11345
11346                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
11347                         if delay > self._job_delay_max:
11348                                 delay = self._job_delay_max
11349                         if (current_time - self._previous_job_start_time) < delay:
11350                                 return True
11351
11352                 return False
11353
11354         def _schedule_tasks_imp(self):
11355                 """
11356                 @rtype: bool
11357                 @returns: True if state changed, False otherwise.
11358                 """
11359
11360                 state_change = 0
11361
11362                 while True:
11363
11364                         if not self._keep_scheduling():
11365                                 return bool(state_change)
11366
11367                         if self._choose_pkg_return_early or \
11368                                 self._merge_wait_scheduled or \
11369                                 (self._jobs and self._unsatisfied_system_deps) or \
11370                                 not self._can_add_job() or \
11371                                 self._job_delay():
11372                                 return bool(state_change)
11373
11374                         pkg = self._choose_pkg()
11375                         if pkg is None:
11376                                 return bool(state_change)
11377
11378                         state_change += 1
11379
11380                         if not pkg.installed:
11381                                 self._pkg_count.curval += 1
11382
11383                         task = self._task(pkg)
11384
11385                         if pkg.installed:
11386                                 merge = PackageMerge(merge=task)
11387                                 merge.addExitListener(self._merge_exit)
11388                                 self._task_queues.merge.add(merge)
11389
11390                         elif pkg.built:
11391                                 self._jobs += 1
11392                                 self._previous_job_start_time = time.time()
11393                                 self._status_display.running = self._jobs
11394                                 task.addExitListener(self._extract_exit)
11395                                 self._task_queues.jobs.add(task)
11396
11397                         else:
11398                                 self._jobs += 1
11399                                 self._previous_job_start_time = time.time()
11400                                 self._status_display.running = self._jobs
11401                                 task.addExitListener(self._build_exit)
11402                                 self._task_queues.jobs.add(task)
11403
11404                 return bool(state_change)
11405
11406         def _task(self, pkg):
11407
11408                 pkg_to_replace = None
11409                 if pkg.operation != "uninstall":
11410                         vardb = pkg.root_config.trees["vartree"].dbapi
11411                         previous_cpv = vardb.match(pkg.slot_atom)
11412                         if previous_cpv:
11413                                 previous_cpv = previous_cpv.pop()
11414                                 pkg_to_replace = self._pkg(previous_cpv,
11415                                         "installed", pkg.root_config, installed=True)
11416
11417                 task = MergeListItem(args_set=self._args_set,
11418                         background=self._background, binpkg_opts=self._binpkg_opts,
11419                         build_opts=self._build_opts,
11420                         config_pool=self._ConfigPool(pkg.root,
11421                         self._allocate_config, self._deallocate_config),
11422                         emerge_opts=self.myopts,
11423                         find_blockers=self._find_blockers(pkg), logger=self._logger,
11424                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
11425                         pkg_to_replace=pkg_to_replace,
11426                         prefetcher=self._prefetchers.get(pkg),
11427                         scheduler=self._sched_iface,
11428                         settings=self._allocate_config(pkg.root),
11429                         statusMessage=self._status_msg,
11430                         world_atom=self._world_atom)
11431
11432                 return task
11433
11434         def _failed_pkg_msg(self, failed_pkg, action, preposition):
11435                 pkg = failed_pkg.pkg
11436                 msg = "%s to %s %s" % \
11437                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
11438                 if pkg.root != "/":
11439                         msg += " %s %s" % (preposition, pkg.root)
11440
11441                 log_path = self._locate_failure_log(failed_pkg)
11442                 if log_path is not None:
11443                         msg += ", Log file:"
11444                 self._status_msg(msg)
11445
11446                 if log_path is not None:
11447                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
11448
11449         def _status_msg(self, msg):
11450                 """
11451                 Display a brief status message (no newlines) in the status display.
11452                 This is called by tasks to provide feedback to the user. This
11453                 delegates the resposibility of generating \r and \n control characters,
11454                 to guarantee that lines are created or erased when necessary and
11455                 appropriate.
11456
11457                 @type msg: str
11458                 @param msg: a brief status message (no newlines allowed)
11459                 """
11460                 if not self._background:
11461                         writemsg_level("\n")
11462                 self._status_display.displayMessage(msg)
11463
11464         def _save_resume_list(self):
11465                 """
11466                 Do this before verifying the ebuild Manifests since it might
11467                 be possible for the user to use --resume --skipfirst get past
11468                 a non-essential package with a broken digest.
11469                 """
11470                 mtimedb = self._mtimedb
11471                 mtimedb["resume"]["mergelist"] = [list(x) \
11472                         for x in self._mergelist \
11473                         if isinstance(x, Package) and x.operation == "merge"]
11474
11475                 mtimedb.commit()
11476
11477         def _calc_resume_list(self):
11478                 """
11479                 Use the current resume list to calculate a new one,
11480                 dropping any packages with unsatisfied deps.
11481                 @rtype: bool
11482                 @returns: True if successful, False otherwise.
11483                 """
11484                 print colorize("GOOD", "*** Resuming merge...")
11485
11486                 if self._show_list():
11487                         if "--tree" in self.myopts:
11488                                 portage.writemsg_stdout("\n" + \
11489                                         darkgreen("These are the packages that " + \
11490                                         "would be merged, in reverse order:\n\n"))
11491
11492                         else:
11493                                 portage.writemsg_stdout("\n" + \
11494                                         darkgreen("These are the packages that " + \
11495                                         "would be merged, in order:\n\n"))
11496
11497                 show_spinner = "--quiet" not in self.myopts and \
11498                         "--nodeps" not in self.myopts
11499
11500                 if show_spinner:
11501                         print "Calculating dependencies  ",
11502
11503                 myparams = create_depgraph_params(self.myopts, None)
11504                 success = False
11505                 e = None
11506                 try:
11507                         success, mydepgraph, dropped_tasks = resume_depgraph(
11508                                 self.settings, self.trees, self._mtimedb, self.myopts,
11509                                 myparams, self._spinner)
11510                 except depgraph.UnsatisfiedResumeDep, exc:
11511                         # rename variable to avoid python-3.0 error:
11512                         # SyntaxError: can not delete variable 'e' referenced in nested
11513                         #              scope
11514                         e = exc
11515                         mydepgraph = e.depgraph
11516                         dropped_tasks = set()
11517
11518                 if show_spinner:
11519                         print "\b\b... done!"
11520
11521                 if e is not None:
11522                         def unsatisfied_resume_dep_msg():
11523                                 mydepgraph.display_problems()
11524                                 out = portage.output.EOutput()
11525                                 out.eerror("One or more packages are either masked or " + \
11526                                         "have missing dependencies:")
11527                                 out.eerror("")
11528                                 indent = "  "
11529                                 show_parents = set()
11530                                 for dep in e.value:
11531                                         if dep.parent in show_parents:
11532                                                 continue
11533                                         show_parents.add(dep.parent)
11534                                         if dep.atom is None:
11535                                                 out.eerror(indent + "Masked package:")
11536                                                 out.eerror(2 * indent + str(dep.parent))
11537                                                 out.eerror("")
11538                                         else:
11539                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
11540                                                 out.eerror(2 * indent + str(dep.parent))
11541                                                 out.eerror("")
11542                                 msg = "The resume list contains packages " + \
11543                                         "that are either masked or have " + \
11544                                         "unsatisfied dependencies. " + \
11545                                         "Please restart/continue " + \
11546                                         "the operation manually, or use --skipfirst " + \
11547                                         "to skip the first package in the list and " + \
11548                                         "any other packages that may be " + \
11549                                         "masked or have missing dependencies."
11550                                 for line in textwrap.wrap(msg, 72):
11551                                         out.eerror(line)
11552                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
11553                         return False
11554
11555                 if success and self._show_list():
11556                         mylist = mydepgraph.altlist()
11557                         if mylist:
11558                                 if "--tree" in self.myopts:
11559                                         mylist.reverse()
11560                                 mydepgraph.display(mylist, favorites=self._favorites)
11561
11562                 if not success:
11563                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
11564                         return False
11565                 mydepgraph.display_problems()
11566
11567                 mylist = mydepgraph.altlist()
11568                 mydepgraph.break_refs(mylist)
11569                 mydepgraph.break_refs(dropped_tasks)
11570                 self._mergelist = mylist
11571                 self._set_digraph(mydepgraph.schedulerGraph())
11572
11573                 msg_width = 75
11574                 for task in dropped_tasks:
11575                         if not (isinstance(task, Package) and task.operation == "merge"):
11576                                 continue
11577                         pkg = task
11578                         msg = "emerge --keep-going:" + \
11579                                 " %s" % (pkg.cpv,)
11580                         if pkg.root != "/":
11581                                 msg += " for %s" % (pkg.root,)
11582                         msg += " dropped due to unsatisfied dependency."
11583                         for line in textwrap.wrap(msg, msg_width):
11584                                 eerror(line, phase="other", key=pkg.cpv)
11585                         settings = self.pkgsettings[pkg.root]
11586                         # Ensure that log collection from $T is disabled inside
11587                         # elog_process(), since any logs that might exist are
11588                         # not valid here.
11589                         settings.pop("T", None)
11590                         portage.elog.elog_process(pkg.cpv, settings)
11591                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
11592
11593                 return True
11594
11595         def _show_list(self):
11596                 myopts = self.myopts
11597                 if "--quiet" not in myopts and \
11598                         ("--ask" in myopts or "--tree" in myopts or \
11599                         "--verbose" in myopts):
11600                         return True
11601                 return False
11602
11603         def _world_atom(self, pkg):
11604                 """
11605                 Add the package to the world file, but only if
11606                 it's supposed to be added. Otherwise, do nothing.
11607                 """
11608
11609                 if set(("--buildpkgonly", "--fetchonly",
11610                         "--fetch-all-uri",
11611                         "--oneshot", "--onlydeps",
11612                         "--pretend")).intersection(self.myopts):
11613                         return
11614
11615                 if pkg.root != self.target_root:
11616                         return
11617
11618                 args_set = self._args_set
11619                 if not args_set.findAtomForPackage(pkg):
11620                         return
11621
11622                 logger = self._logger
11623                 pkg_count = self._pkg_count
11624                 root_config = pkg.root_config
11625                 world_set = root_config.sets["world"]
11626                 world_locked = False
11627                 if hasattr(world_set, "lock"):
11628                         world_set.lock()
11629                         world_locked = True
11630
11631                 try:
11632                         if hasattr(world_set, "load"):
11633                                 world_set.load() # maybe it's changed on disk
11634
11635                         atom = create_world_atom(pkg, args_set, root_config)
11636                         if atom:
11637                                 if hasattr(world_set, "add"):
11638                                         self._status_msg(('Recording %s in "world" ' + \
11639                                                 'favorites file...') % atom)
11640                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
11641                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
11642                                         world_set.add(atom)
11643                                 else:
11644                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
11645                                                 (atom,), level=logging.WARN, noiselevel=-1)
11646                 finally:
11647                         if world_locked:
11648                                 world_set.unlock()
11649
11650         def _pkg(self, cpv, type_name, root_config, installed=False):
11651                 """
11652                 Get a package instance from the cache, or create a new
11653                 one if necessary. Raises KeyError from aux_get if it
11654                 failures for some reason (package does not exist or is
11655                 corrupt).
11656                 """
11657                 operation = "merge"
11658                 if installed:
11659                         operation = "nomerge"
11660
11661                 if self._digraph is not None:
11662                         # Reuse existing instance when available.
11663                         pkg = self._digraph.get(
11664                                 (type_name, root_config.root, cpv, operation))
11665                         if pkg is not None:
11666                                 return pkg
11667
11668                 tree_type = depgraph.pkg_tree_map[type_name]
11669                 db = root_config.trees[tree_type].dbapi
11670                 db_keys = list(self.trees[root_config.root][
11671                         tree_type].dbapi._aux_cache_keys)
11672                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11673                 pkg = Package(cpv=cpv, metadata=metadata,
11674                         root_config=root_config, installed=installed)
11675                 if type_name == "ebuild":
11676                         settings = self.pkgsettings[root_config.root]
11677                         settings.setcpv(pkg)
11678                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11679                         pkg.metadata['CHOST'] = settings.get('CHOST', '')
11680
11681                 return pkg
11682
11683 class MetadataRegen(PollScheduler):
11684
11685         def __init__(self, portdb, cp_iter=None, consumer=None,
11686                 max_jobs=None, max_load=None):
11687                 PollScheduler.__init__(self)
11688                 self._portdb = portdb
11689                 self._global_cleanse = False
11690                 if cp_iter is None:
11691                         cp_iter = self._iter_every_cp()
11692                         # We can globally cleanse stale cache only if we
11693                         # iterate over every single cp.
11694                         self._global_cleanse = True
11695                 self._cp_iter = cp_iter
11696                 self._consumer = consumer
11697
11698                 if max_jobs is None:
11699                         max_jobs = 1
11700
11701                 self._max_jobs = max_jobs
11702                 self._max_load = max_load
11703                 self._sched_iface = self._sched_iface_class(
11704                         register=self._register,
11705                         schedule=self._schedule_wait,
11706                         unregister=self._unregister)
11707
11708                 self._valid_pkgs = set()
11709                 self._cp_set = set()
11710                 self._process_iter = self._iter_metadata_processes()
11711                 self.returncode = os.EX_OK
11712                 self._error_count = 0
11713
11714         def _iter_every_cp(self):
11715                 every_cp = self._portdb.cp_all()
11716                 every_cp.sort(reverse=True)
11717                 try:
11718                         while True:
11719                                 yield every_cp.pop()
11720                 except IndexError:
11721                         pass
11722
11723         def _iter_metadata_processes(self):
11724                 portdb = self._portdb
11725                 valid_pkgs = self._valid_pkgs
11726                 cp_set = self._cp_set
11727                 consumer = self._consumer
11728
11729                 for cp in self._cp_iter:
11730                         cp_set.add(cp)
11731                         portage.writemsg_stdout("Processing %s\n" % cp)
11732                         cpv_list = portdb.cp_list(cp)
11733                         for cpv in cpv_list:
11734                                 valid_pkgs.add(cpv)
11735                                 ebuild_path, repo_path = portdb.findname2(cpv)
11736                                 metadata, st, emtime = portdb._pull_valid_cache(
11737                                         cpv, ebuild_path, repo_path)
11738                                 if metadata is not None:
11739                                         if consumer is not None:
11740                                                 consumer(cpv, ebuild_path,
11741                                                         repo_path, metadata)
11742                                         continue
11743
11744                                 yield EbuildMetadataPhase(cpv=cpv, ebuild_path=ebuild_path,
11745                                         ebuild_mtime=emtime,
11746                                         metadata_callback=portdb._metadata_callback,
11747                                         portdb=portdb, repo_path=repo_path,
11748                                         settings=portdb.doebuild_settings)
11749
11750         def run(self):
11751
11752                 portdb = self._portdb
11753                 from portage.cache.cache_errors import CacheError
11754                 dead_nodes = {}
11755
11756                 while self._schedule():
11757                         self._poll_loop()
11758
11759                 while self._jobs:
11760                         self._poll_loop()
11761
11762                 if self._global_cleanse:
11763                         for mytree in portdb.porttrees:
11764                                 try:
11765                                         dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11766                                 except CacheError, e:
11767                                         portage.writemsg("Error listing cache entries for " + \
11768                                                 "'%s': %s, continuing...\n" % (mytree, e),
11769                                                 noiselevel=-1)
11770                                         del e
11771                                         dead_nodes = None
11772                                         break
11773                 else:
11774                         cp_set = self._cp_set
11775                         cpv_getkey = portage.cpv_getkey
11776                         for mytree in portdb.porttrees:
11777                                 try:
11778                                         dead_nodes[mytree] = set(cpv for cpv in \
11779                                                 portdb.auxdb[mytree].iterkeys() \
11780                                                 if cpv_getkey(cpv) in cp_set)
11781                                 except CacheError, e:
11782                                         portage.writemsg("Error listing cache entries for " + \
11783                                                 "'%s': %s, continuing...\n" % (mytree, e),
11784                                                 noiselevel=-1)
11785                                         del e
11786                                         dead_nodes = None
11787                                         break
11788
11789                 if dead_nodes:
11790                         for y in self._valid_pkgs:
11791                                 for mytree in portdb.porttrees:
11792                                         if portdb.findname2(y, mytree=mytree)[0]:
11793                                                 dead_nodes[mytree].discard(y)
11794
11795                         for mytree, nodes in dead_nodes.iteritems():
11796                                 auxdb = portdb.auxdb[mytree]
11797                                 for y in nodes:
11798                                         try:
11799                                                 del auxdb[y]
11800                                         except (KeyError, CacheError):
11801                                                 pass
11802
11803         def _schedule_tasks(self):
11804                 """
11805                 @rtype: bool
11806                 @returns: True if there may be remaining tasks to schedule,
11807                         False otherwise.
11808                 """
11809                 while self._can_add_job():
11810                         try:
11811                                 metadata_process = self._process_iter.next()
11812                         except StopIteration:
11813                                 return False
11814
11815                         self._jobs += 1
11816                         metadata_process.scheduler = self._sched_iface
11817                         metadata_process.addExitListener(self._metadata_exit)
11818                         metadata_process.start()
11819                 return True
11820
11821         def _metadata_exit(self, metadata_process):
11822                 self._jobs -= 1
11823                 if metadata_process.returncode != os.EX_OK:
11824                         self.returncode = 1
11825                         self._error_count += 1
11826                         self._valid_pkgs.discard(metadata_process.cpv)
11827                         portage.writemsg("Error processing %s, continuing...\n" % \
11828                                 (metadata_process.cpv,), noiselevel=-1)
11829
11830                 if self._consumer is not None:
11831                         # On failure, still notify the consumer (in this case the metadata
11832                         # argument is None).
11833                         self._consumer(metadata_process.cpv,
11834                                 metadata_process.ebuild_path,
11835                                 metadata_process.repo_path,
11836                                 metadata_process.metadata)
11837
11838                 self._schedule()
11839
11840 class UninstallFailure(portage.exception.PortageException):
11841         """
11842         An instance of this class is raised by unmerge() when
11843         an uninstallation fails.
11844         """
11845         status = 1
11846         def __init__(self, *pargs):
11847                 portage.exception.PortageException.__init__(self, pargs)
11848                 if pargs:
11849                         self.status = pargs[0]
11850
11851 def unmerge(root_config, myopts, unmerge_action,
11852         unmerge_files, ldpath_mtimes, autoclean=0,
11853         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11854         scheduler=None, writemsg_level=portage.util.writemsg_level):
11855
11856         if clean_world:
11857                 clean_world = myopts.get('--deselect') != 'n'
11858         quiet = "--quiet" in myopts
11859         settings = root_config.settings
11860         sets = root_config.sets
11861         vartree = root_config.trees["vartree"]
11862         candidate_catpkgs=[]
11863         global_unmerge=0
11864         xterm_titles = "notitles" not in settings.features
11865         out = portage.output.EOutput()
11866         pkg_cache = {}
11867         db_keys = list(vartree.dbapi._aux_cache_keys)
11868
11869         def _pkg(cpv):
11870                 pkg = pkg_cache.get(cpv)
11871                 if pkg is None:
11872                         pkg = Package(cpv=cpv, installed=True,
11873                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11874                                 root_config=root_config,
11875                                 type_name="installed")
11876                         pkg_cache[cpv] = pkg
11877                 return pkg
11878
11879         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11880         try:
11881                 # At least the parent needs to exist for the lock file.
11882                 portage.util.ensure_dirs(vdb_path)
11883         except portage.exception.PortageException:
11884                 pass
11885         vdb_lock = None
11886         try:
11887                 if os.access(vdb_path, os.W_OK):
11888                         vdb_lock = portage.locks.lockdir(vdb_path)
11889                 realsyslist = sets["system"].getAtoms()
11890                 syslist = []
11891                 for x in realsyslist:
11892                         mycp = portage.dep_getkey(x)
11893                         if mycp in settings.getvirtuals():
11894                                 providers = []
11895                                 for provider in settings.getvirtuals()[mycp]:
11896                                         if vartree.dbapi.match(provider):
11897                                                 providers.append(provider)
11898                                 if len(providers) == 1:
11899                                         syslist.extend(providers)
11900                         else:
11901                                 syslist.append(mycp)
11902         
11903                 mysettings = portage.config(clone=settings)
11904         
11905                 if not unmerge_files:
11906                         if unmerge_action == "unmerge":
11907                                 print
11908                                 print bold("emerge unmerge") + " can only be used with specific package names"
11909                                 print
11910                                 return 0
11911                         else:
11912                                 global_unmerge = 1
11913         
11914                 localtree = vartree
11915                 # process all arguments and add all
11916                 # valid db entries to candidate_catpkgs
11917                 if global_unmerge:
11918                         if not unmerge_files:
11919                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11920                 else:
11921                         #we've got command-line arguments
11922                         if not unmerge_files:
11923                                 print "\nNo packages to unmerge have been provided.\n"
11924                                 return 0
11925                         for x in unmerge_files:
11926                                 arg_parts = x.split('/')
11927                                 if x[0] not in [".","/"] and \
11928                                         arg_parts[-1][-7:] != ".ebuild":
11929                                         #possible cat/pkg or dep; treat as such
11930                                         candidate_catpkgs.append(x)
11931                                 elif unmerge_action in ["prune","clean"]:
11932                                         print "\n!!! Prune and clean do not accept individual" + \
11933                                                 " ebuilds as arguments;\n    skipping.\n"
11934                                         continue
11935                                 else:
11936                                         # it appears that the user is specifying an installed
11937                                         # ebuild and we're in "unmerge" mode, so it's ok.
11938                                         if not os.path.exists(x):
11939                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11940                                                 return 0
11941         
11942                                         absx   = os.path.abspath(x)
11943                                         sp_absx = absx.split("/")
11944                                         if sp_absx[-1][-7:] == ".ebuild":
11945                                                 del sp_absx[-1]
11946                                                 absx = "/".join(sp_absx)
11947         
11948                                         sp_absx_len = len(sp_absx)
11949         
11950                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11951                                         vdb_len  = len(vdb_path)
11952         
11953                                         sp_vdb     = vdb_path.split("/")
11954                                         sp_vdb_len = len(sp_vdb)
11955         
11956                                         if not os.path.exists(absx+"/CONTENTS"):
11957                                                 print "!!! Not a valid db dir: "+str(absx)
11958                                                 return 0
11959         
11960                                         if sp_absx_len <= sp_vdb_len:
11961                                                 # The Path is shorter... so it can't be inside the vdb.
11962                                                 print sp_absx
11963                                                 print absx
11964                                                 print "\n!!!",x,"cannot be inside "+ \
11965                                                         vdb_path+"; aborting.\n"
11966                                                 return 0
11967         
11968                                         for idx in range(0,sp_vdb_len):
11969                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11970                                                         print sp_absx
11971                                                         print absx
11972                                                         print "\n!!!", x, "is not inside "+\
11973                                                                 vdb_path+"; aborting.\n"
11974                                                         return 0
11975         
11976                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11977                                         candidate_catpkgs.append(
11978                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11979         
11980                 newline=""
11981                 if (not "--quiet" in myopts):
11982                         newline="\n"
11983                 if settings["ROOT"] != "/":
11984                         writemsg_level(darkgreen(newline+ \
11985                                 ">>> Using system located in ROOT tree %s\n" % \
11986                                 settings["ROOT"]))
11987
11988                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11989                         not ("--quiet" in myopts):
11990                         writemsg_level(darkgreen(newline+\
11991                                 ">>> These are the packages that would be unmerged:\n"))
11992
11993                 # Preservation of order is required for --depclean and --prune so
11994                 # that dependencies are respected. Use all_selected to eliminate
11995                 # duplicate packages since the same package may be selected by
11996                 # multiple atoms.
11997                 pkgmap = []
11998                 all_selected = set()
11999                 for x in candidate_catpkgs:
12000                         # cycle through all our candidate deps and determine
12001                         # what will and will not get unmerged
12002                         try:
12003                                 mymatch = vartree.dbapi.match(x)
12004                         except portage.exception.AmbiguousPackageName, errpkgs:
12005                                 print "\n\n!!! The short ebuild name \"" + \
12006                                         x + "\" is ambiguous.  Please specify"
12007                                 print "!!! one of the following fully-qualified " + \
12008                                         "ebuild names instead:\n"
12009                                 for i in errpkgs[0]:
12010                                         print "    " + green(i)
12011                                 print
12012                                 sys.exit(1)
12013         
12014                         if not mymatch and x[0] not in "<>=~":
12015                                 mymatch = localtree.dep_match(x)
12016                         if not mymatch:
12017                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
12018                                         (x, unmerge_action), noiselevel=-1)
12019                                 continue
12020
12021                         pkgmap.append(
12022                                 {"protected": set(), "selected": set(), "omitted": set()})
12023                         mykey = len(pkgmap) - 1
12024                         if unmerge_action=="unmerge":
12025                                         for y in mymatch:
12026                                                 if y not in all_selected:
12027                                                         pkgmap[mykey]["selected"].add(y)
12028                                                         all_selected.add(y)
12029                         elif unmerge_action == "prune":
12030                                 if len(mymatch) == 1:
12031                                         continue
12032                                 best_version = mymatch[0]
12033                                 best_slot = vartree.getslot(best_version)
12034                                 best_counter = vartree.dbapi.cpv_counter(best_version)
12035                                 for mypkg in mymatch[1:]:
12036                                         myslot = vartree.getslot(mypkg)
12037                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
12038                                         if (myslot == best_slot and mycounter > best_counter) or \
12039                                                 mypkg == portage.best([mypkg, best_version]):
12040                                                 if myslot == best_slot:
12041                                                         if mycounter < best_counter:
12042                                                                 # On slot collision, keep the one with the
12043                                                                 # highest counter since it is the most
12044                                                                 # recently installed.
12045                                                                 continue
12046                                                 best_version = mypkg
12047                                                 best_slot = myslot
12048                                                 best_counter = mycounter
12049                                 pkgmap[mykey]["protected"].add(best_version)
12050                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
12051                                         if mypkg != best_version and mypkg not in all_selected)
12052                                 all_selected.update(pkgmap[mykey]["selected"])
12053                         else:
12054                                 # unmerge_action == "clean"
12055                                 slotmap={}
12056                                 for mypkg in mymatch:
12057                                         if unmerge_action == "clean":
12058                                                 myslot = localtree.getslot(mypkg)
12059                                         else:
12060                                                 # since we're pruning, we don't care about slots
12061                                                 # and put all the pkgs in together
12062                                                 myslot = 0
12063                                         if myslot not in slotmap:
12064                                                 slotmap[myslot] = {}
12065                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
12066
12067                                 for mypkg in vartree.dbapi.cp_list(
12068                                         portage.dep_getkey(mymatch[0])):
12069                                         myslot = vartree.getslot(mypkg)
12070                                         if myslot not in slotmap:
12071                                                 slotmap[myslot] = {}
12072                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
12073
12074                                 for myslot in slotmap:
12075                                         counterkeys = slotmap[myslot].keys()
12076                                         if not counterkeys:
12077                                                 continue
12078                                         counterkeys.sort()
12079                                         pkgmap[mykey]["protected"].add(
12080                                                 slotmap[myslot][counterkeys[-1]])
12081                                         del counterkeys[-1]
12082
12083                                         for counter in counterkeys[:]:
12084                                                 mypkg = slotmap[myslot][counter]
12085                                                 if mypkg not in mymatch:
12086                                                         counterkeys.remove(counter)
12087                                                         pkgmap[mykey]["protected"].add(
12088                                                                 slotmap[myslot][counter])
12089
12090                                         #be pretty and get them in order of merge:
12091                                         for ckey in counterkeys:
12092                                                 mypkg = slotmap[myslot][ckey]
12093                                                 if mypkg not in all_selected:
12094                                                         pkgmap[mykey]["selected"].add(mypkg)
12095                                                         all_selected.add(mypkg)
12096                                         # ok, now the last-merged package
12097                                         # is protected, and the rest are selected
12098                 numselected = len(all_selected)
12099                 if global_unmerge and not numselected:
12100                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
12101                         return 0
12102         
12103                 if not numselected:
12104                         portage.writemsg_stdout(
12105                                 "\n>>> No packages selected for removal by " + \
12106                                 unmerge_action + "\n")
12107                         return 0
12108         finally:
12109                 if vdb_lock:
12110                         vartree.dbapi.flush_cache()
12111                         portage.locks.unlockdir(vdb_lock)
12112         
12113         from portage.sets.base import EditablePackageSet
12114         
12115         # generate a list of package sets that are directly or indirectly listed in "world",
12116         # as there is no persistent list of "installed" sets
12117         installed_sets = ["world"]
12118         stop = False
12119         pos = 0
12120         while not stop:
12121                 stop = True
12122                 pos = len(installed_sets)
12123                 for s in installed_sets[pos - 1:]:
12124                         if s not in sets:
12125                                 continue
12126                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
12127                         if candidates:
12128                                 stop = False
12129                                 installed_sets += candidates
12130         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
12131         del stop, pos
12132
12133         # we don't want to unmerge packages that are still listed in user-editable package sets
12134         # listed in "world" as they would be remerged on the next update of "world" or the 
12135         # relevant package sets.
12136         unknown_sets = set()
12137         for cp in xrange(len(pkgmap)):
12138                 for cpv in pkgmap[cp]["selected"].copy():
12139                         try:
12140                                 pkg = _pkg(cpv)
12141                         except KeyError:
12142                                 # It could have been uninstalled
12143                                 # by a concurrent process.
12144                                 continue
12145
12146                         if unmerge_action != "clean" and \
12147                                 root_config.root == "/" and \
12148                                 portage.match_from_list(
12149                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
12150                                 msg = ("Not unmerging package %s since there is no valid " + \
12151                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
12152                                 for line in textwrap.wrap(msg, 75):
12153                                         out.eerror(line)
12154                                 # adjust pkgmap so the display output is correct
12155                                 pkgmap[cp]["selected"].remove(cpv)
12156                                 all_selected.remove(cpv)
12157                                 pkgmap[cp]["protected"].add(cpv)
12158                                 continue
12159
12160                         parents = []
12161                         for s in installed_sets:
12162                                 # skip sets that the user requested to unmerge, and skip world 
12163                                 # unless we're unmerging a package set (as the package would be 
12164                                 # removed from "world" later on)
12165                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
12166                                         continue
12167
12168                                 if s not in sets:
12169                                         if s in unknown_sets:
12170                                                 continue
12171                                         unknown_sets.add(s)
12172                                         out = portage.output.EOutput()
12173                                         out.eerror(("Unknown set '@%s' in " + \
12174                                                 "%svar/lib/portage/world_sets") % \
12175                                                 (s, root_config.root))
12176                                         continue
12177
12178                                 # only check instances of EditablePackageSet as other classes are generally used for
12179                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
12180                                 # user can't do much about them anyway)
12181                                 if isinstance(sets[s], EditablePackageSet):
12182
12183                                         # This is derived from a snippet of code in the
12184                                         # depgraph._iter_atoms_for_pkg() method.
12185                                         for atom in sets[s].iterAtomsForPackage(pkg):
12186                                                 inst_matches = vartree.dbapi.match(atom)
12187                                                 inst_matches.reverse() # descending order
12188                                                 higher_slot = None
12189                                                 for inst_cpv in inst_matches:
12190                                                         try:
12191                                                                 inst_pkg = _pkg(inst_cpv)
12192                                                         except KeyError:
12193                                                                 # It could have been uninstalled
12194                                                                 # by a concurrent process.
12195                                                                 continue
12196
12197                                                         if inst_pkg.cp != atom.cp:
12198                                                                 continue
12199                                                         if pkg >= inst_pkg:
12200                                                                 # This is descending order, and we're not
12201                                                                 # interested in any versions <= pkg given.
12202                                                                 break
12203                                                         if pkg.slot_atom != inst_pkg.slot_atom:
12204                                                                 higher_slot = inst_pkg
12205                                                                 break
12206                                                 if higher_slot is None:
12207                                                         parents.append(s)
12208                                                         break
12209                         if parents:
12210                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
12211                                 #print colorize("WARN", "but still listed in the following package sets:")
12212                                 #print "    %s\n" % ", ".join(parents)
12213                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
12214                                 print colorize("WARN", "still referenced by the following package sets:")
12215                                 print "    %s\n" % ", ".join(parents)
12216                                 # adjust pkgmap so the display output is correct
12217                                 pkgmap[cp]["selected"].remove(cpv)
12218                                 all_selected.remove(cpv)
12219                                 pkgmap[cp]["protected"].add(cpv)
12220         
12221         del installed_sets
12222
12223         numselected = len(all_selected)
12224         if not numselected:
12225                 writemsg_level(
12226                         "\n>>> No packages selected for removal by " + \
12227                         unmerge_action + "\n")
12228                 return 0
12229
12230         # Unmerge order only matters in some cases
12231         if not ordered:
12232                 unordered = {}
12233                 for d in pkgmap:
12234                         selected = d["selected"]
12235                         if not selected:
12236                                 continue
12237                         cp = portage.cpv_getkey(iter(selected).next())
12238                         cp_dict = unordered.get(cp)
12239                         if cp_dict is None:
12240                                 cp_dict = {}
12241                                 unordered[cp] = cp_dict
12242                                 for k in d:
12243                                         cp_dict[k] = set()
12244                         for k, v in d.iteritems():
12245                                 cp_dict[k].update(v)
12246                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
12247
12248         for x in xrange(len(pkgmap)):
12249                 selected = pkgmap[x]["selected"]
12250                 if not selected:
12251                         continue
12252                 for mytype, mylist in pkgmap[x].iteritems():
12253                         if mytype == "selected":
12254                                 continue
12255                         mylist.difference_update(all_selected)
12256                 cp = portage.cpv_getkey(iter(selected).next())
12257                 for y in localtree.dep_match(cp):
12258                         if y not in pkgmap[x]["omitted"] and \
12259                                 y not in pkgmap[x]["selected"] and \
12260                                 y not in pkgmap[x]["protected"] and \
12261                                 y not in all_selected:
12262                                 pkgmap[x]["omitted"].add(y)
12263                 if global_unmerge and not pkgmap[x]["selected"]:
12264                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
12265                         continue
12266                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
12267                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
12268                                 "'%s' is part of your system profile.\n" % cp),
12269                                 level=logging.WARNING, noiselevel=-1)
12270                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
12271                                 "be damaging to your system.\n\n"),
12272                                 level=logging.WARNING, noiselevel=-1)
12273                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
12274                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
12275                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
12276                 if not quiet:
12277                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
12278                 else:
12279                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
12280                 for mytype in ["selected","protected","omitted"]:
12281                         if not quiet:
12282                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
12283                         if pkgmap[x][mytype]:
12284                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
12285                                 sorted_pkgs.sort(key=cmp_sort_key(portage.pkgcmp))
12286                                 for pn, ver, rev in sorted_pkgs:
12287                                         if rev == "r0":
12288                                                 myversion = ver
12289                                         else:
12290                                                 myversion = ver + "-" + rev
12291                                         if mytype == "selected":
12292                                                 writemsg_level(
12293                                                         colorize("UNMERGE_WARN", myversion + " "),
12294                                                         noiselevel=-1)
12295                                         else:
12296                                                 writemsg_level(
12297                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
12298                         else:
12299                                 writemsg_level("none ", noiselevel=-1)
12300                         if not quiet:
12301                                 writemsg_level("\n", noiselevel=-1)
12302                 if quiet:
12303                         writemsg_level("\n", noiselevel=-1)
12304
12305         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
12306                 " packages are slated for removal.\n")
12307         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
12308                         " and " + colorize("GOOD", "'omitted'") + \
12309                         " packages will not be removed.\n\n")
12310
12311         if "--pretend" in myopts:
12312                 #we're done... return
12313                 return 0
12314         if "--ask" in myopts:
12315                 if userquery("Would you like to unmerge these packages?")=="No":
12316                         # enter pretend mode for correct formatting of results
12317                         myopts["--pretend"] = True
12318                         print
12319                         print "Quitting."
12320                         print
12321                         return 0
12322         #the real unmerging begins, after a short delay....
12323         if clean_delay and not autoclean:
12324                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
12325
12326         for x in xrange(len(pkgmap)):
12327                 for y in pkgmap[x]["selected"]:
12328                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
12329                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
12330                         mysplit = y.split("/")
12331                         #unmerge...
12332                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
12333                                 mysettings, unmerge_action not in ["clean","prune"],
12334                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
12335                                 scheduler=scheduler)
12336
12337                         if retval != os.EX_OK:
12338                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
12339                                 if raise_on_error:
12340                                         raise UninstallFailure(retval)
12341                                 sys.exit(retval)
12342                         else:
12343                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
12344                                         sets["world"].cleanPackage(vartree.dbapi, y)
12345                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
12346         if clean_world and hasattr(sets["world"], "remove"):
12347                 for s in root_config.setconfig.active:
12348                         sets["world"].remove(SETPREFIX+s)
12349         return 1
12350
12351 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
12352
12353         if os.path.exists("/usr/bin/install-info"):
12354                 out = portage.output.EOutput()
12355                 regen_infodirs=[]
12356                 for z in infodirs:
12357                         if z=='':
12358                                 continue
12359                         inforoot=normpath(root+z)
12360                         if os.path.isdir(inforoot):
12361                                 infomtime = long(os.stat(inforoot).st_mtime)
12362                                 if inforoot not in prev_mtimes or \
12363                                         prev_mtimes[inforoot] != infomtime:
12364                                                 regen_infodirs.append(inforoot)
12365
12366                 if not regen_infodirs:
12367                         portage.writemsg_stdout("\n")
12368                         out.einfo("GNU info directory index is up-to-date.")
12369                 else:
12370                         portage.writemsg_stdout("\n")
12371                         out.einfo("Regenerating GNU info directory index...")
12372
12373                         dir_extensions = ("", ".gz", ".bz2")
12374                         icount=0
12375                         badcount=0
12376                         errmsg = ""
12377                         for inforoot in regen_infodirs:
12378                                 if inforoot=='':
12379                                         continue
12380
12381                                 if not os.path.isdir(inforoot) or \
12382                                         not os.access(inforoot, os.W_OK):
12383                                         continue
12384
12385                                 file_list = os.listdir(inforoot)
12386                                 file_list.sort()
12387                                 dir_file = os.path.join(inforoot, "dir")
12388                                 moved_old_dir = False
12389                                 processed_count = 0
12390                                 for x in file_list:
12391                                         if x.startswith(".") or \
12392                                                 os.path.isdir(os.path.join(inforoot, x)):
12393                                                 continue
12394                                         if x.startswith("dir"):
12395                                                 skip = False
12396                                                 for ext in dir_extensions:
12397                                                         if x == "dir" + ext or \
12398                                                                 x == "dir" + ext + ".old":
12399                                                                 skip = True
12400                                                                 break
12401                                                 if skip:
12402                                                         continue
12403                                         if processed_count == 0:
12404                                                 for ext in dir_extensions:
12405                                                         try:
12406                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
12407                                                                 moved_old_dir = True
12408                                                         except EnvironmentError, e:
12409                                                                 if e.errno != errno.ENOENT:
12410                                                                         raise
12411                                                                 del e
12412                                         processed_count += 1
12413                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
12414                                         existsstr="already exists, for file `"
12415                                         if myso!="":
12416                                                 if re.search(existsstr,myso):
12417                                                         # Already exists... Don't increment the count for this.
12418                                                         pass
12419                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
12420                                                         # This info file doesn't contain a DIR-header: install-info produces this
12421                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
12422                                                         # Don't increment the count for this.
12423                                                         pass
12424                                                 else:
12425                                                         badcount=badcount+1
12426                                                         errmsg += myso + "\n"
12427                                         icount=icount+1
12428
12429                                 if moved_old_dir and not os.path.exists(dir_file):
12430                                         # We didn't generate a new dir file, so put the old file
12431                                         # back where it was originally found.
12432                                         for ext in dir_extensions:
12433                                                 try:
12434                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
12435                                                 except EnvironmentError, e:
12436                                                         if e.errno != errno.ENOENT:
12437                                                                 raise
12438                                                         del e
12439
12440                                 # Clean dir.old cruft so that they don't prevent
12441                                 # unmerge of otherwise empty directories.
12442                                 for ext in dir_extensions:
12443                                         try:
12444                                                 os.unlink(dir_file + ext + ".old")
12445                                         except EnvironmentError, e:
12446                                                 if e.errno != errno.ENOENT:
12447                                                         raise
12448                                                 del e
12449
12450                                 #update mtime so we can potentially avoid regenerating.
12451                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
12452
12453                         if badcount:
12454                                 out.eerror("Processed %d info files; %d errors." % \
12455                                         (icount, badcount))
12456                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
12457                         else:
12458                                 if icount > 0:
12459                                         out.einfo("Processed %d info files." % (icount,))
12460
12461
12462 def display_news_notification(root_config, myopts):
12463         target_root = root_config.root
12464         trees = root_config.trees
12465         settings = trees["vartree"].settings
12466         portdb = trees["porttree"].dbapi
12467         vardb = trees["vartree"].dbapi
12468         NEWS_PATH = os.path.join("metadata", "news")
12469         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
12470         newsReaderDisplay = False
12471         update = "--pretend" not in myopts
12472
12473         for repo in portdb.getRepositories():
12474                 unreadItems = checkUpdatedNewsItems(
12475                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
12476                 if unreadItems:
12477                         if not newsReaderDisplay:
12478                                 newsReaderDisplay = True
12479                                 print
12480                         print colorize("WARN", " * IMPORTANT:"),
12481                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
12482                         
12483         
12484         if newsReaderDisplay:
12485                 print colorize("WARN", " *"),
12486                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
12487                 print
12488
12489 def display_preserved_libs(vardbapi):
12490         MAX_DISPLAY = 3
12491
12492         # Ensure the registry is consistent with existing files.
12493         vardbapi.plib_registry.pruneNonExisting()
12494
12495         if vardbapi.plib_registry.hasEntries():
12496                 print
12497                 print colorize("WARN", "!!!") + " existing preserved libs:"
12498                 plibdata = vardbapi.plib_registry.getPreservedLibs()
12499                 linkmap = vardbapi.linkmap
12500                 consumer_map = {}
12501                 owners = {}
12502                 linkmap_broken = False
12503
12504                 try:
12505                         linkmap.rebuild()
12506                 except portage.exception.CommandNotFound, e:
12507                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
12508                                 level=logging.ERROR, noiselevel=-1)
12509                         del e
12510                         linkmap_broken = True
12511                 else:
12512                         search_for_owners = set()
12513                         for cpv in plibdata:
12514                                 internal_plib_keys = set(linkmap._obj_key(f) \
12515                                         for f in plibdata[cpv])
12516                                 for f in plibdata[cpv]:
12517                                         if f in consumer_map:
12518                                                 continue
12519                                         consumers = []
12520                                         for c in linkmap.findConsumers(f):
12521                                                 # Filter out any consumers that are also preserved libs
12522                                                 # belonging to the same package as the provider.
12523                                                 if linkmap._obj_key(c) not in internal_plib_keys:
12524                                                         consumers.append(c)
12525                                         consumers.sort()
12526                                         consumer_map[f] = consumers
12527                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
12528
12529                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
12530
12531                 for cpv in plibdata:
12532                         print colorize("WARN", ">>>") + " package: %s" % cpv
12533                         samefile_map = {}
12534                         for f in plibdata[cpv]:
12535                                 obj_key = linkmap._obj_key(f)
12536                                 alt_paths = samefile_map.get(obj_key)
12537                                 if alt_paths is None:
12538                                         alt_paths = set()
12539                                         samefile_map[obj_key] = alt_paths
12540                                 alt_paths.add(f)
12541
12542                         for alt_paths in samefile_map.itervalues():
12543                                 alt_paths = sorted(alt_paths)
12544                                 for p in alt_paths:
12545                                         print colorize("WARN", " * ") + " - %s" % (p,)
12546                                 f = alt_paths[0]
12547                                 consumers = consumer_map.get(f, [])
12548                                 for c in consumers[:MAX_DISPLAY]:
12549                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12550                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
12551                                 if len(consumers) == MAX_DISPLAY + 1:
12552                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
12553                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
12554                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
12555                                 elif len(consumers) > MAX_DISPLAY:
12556                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
12557                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
12558
12559
12560 def _flush_elog_mod_echo():
12561         """
12562         Dump the mod_echo output now so that our other
12563         notifications are shown last.
12564         @rtype: bool
12565         @returns: True if messages were shown, False otherwise.
12566         """
12567         messages_shown = False
12568         try:
12569                 from portage.elog import mod_echo
12570         except ImportError:
12571                 pass # happens during downgrade to a version without the module
12572         else:
12573                 messages_shown = bool(mod_echo._items)
12574                 mod_echo.finalize()
12575         return messages_shown
12576
12577 def post_emerge(root_config, myopts, mtimedb, retval):
12578         """
12579         Misc. things to run at the end of a merge session.
12580         
12581         Update Info Files
12582         Update Config Files
12583         Update News Items
12584         Commit mtimeDB
12585         Display preserved libs warnings
12586         Exit Emerge
12587
12588         @param trees: A dictionary mapping each ROOT to it's package databases
12589         @type trees: dict
12590         @param mtimedb: The mtimeDB to store data needed across merge invocations
12591         @type mtimedb: MtimeDB class instance
12592         @param retval: Emerge's return value
12593         @type retval: Int
12594         @rype: None
12595         @returns:
12596         1.  Calls sys.exit(retval)
12597         """
12598
12599         target_root = root_config.root
12600         trees = { target_root : root_config.trees }
12601         vardbapi = trees[target_root]["vartree"].dbapi
12602         settings = vardbapi.settings
12603         info_mtimes = mtimedb["info"]
12604
12605         # Load the most current variables from ${ROOT}/etc/profile.env
12606         settings.unlock()
12607         settings.reload()
12608         settings.regenerate()
12609         settings.lock()
12610
12611         config_protect = settings.get("CONFIG_PROTECT","").split()
12612         infodirs = settings.get("INFOPATH","").split(":") + \
12613                 settings.get("INFODIR","").split(":")
12614
12615         os.chdir("/")
12616
12617         if retval == os.EX_OK:
12618                 exit_msg = " *** exiting successfully."
12619         else:
12620                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
12621         emergelog("notitles" not in settings.features, exit_msg)
12622
12623         _flush_elog_mod_echo()
12624
12625         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
12626         if "--pretend" in myopts or (counter_hash is not None and \
12627                 counter_hash == vardbapi._counter_hash()):
12628                 display_news_notification(root_config, myopts)
12629                 # If vdb state has not changed then there's nothing else to do.
12630                 sys.exit(retval)
12631
12632         vdb_path = os.path.join(target_root, portage.VDB_PATH)
12633         portage.util.ensure_dirs(vdb_path)
12634         vdb_lock = None
12635         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
12636                 vdb_lock = portage.locks.lockdir(vdb_path)
12637
12638         if vdb_lock:
12639                 try:
12640                         if "noinfo" not in settings.features:
12641                                 chk_updated_info_files(target_root,
12642                                         infodirs, info_mtimes, retval)
12643                         mtimedb.commit()
12644                 finally:
12645                         if vdb_lock:
12646                                 portage.locks.unlockdir(vdb_lock)
12647
12648         chk_updated_cfg_files(target_root, config_protect)
12649         
12650         display_news_notification(root_config, myopts)
12651         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
12652                 display_preserved_libs(vardbapi)        
12653
12654         sys.exit(retval)
12655
12656
12657 def chk_updated_cfg_files(target_root, config_protect):
12658         if config_protect:
12659                 #number of directories with some protect files in them
12660                 procount=0
12661                 for x in config_protect:
12662                         x = os.path.join(target_root, x.lstrip(os.path.sep))
12663                         if not os.access(x, os.W_OK):
12664                                 # Avoid Permission denied errors generated
12665                                 # later by `find`.
12666                                 continue
12667                         try:
12668                                 mymode = os.lstat(x).st_mode
12669                         except OSError:
12670                                 continue
12671                         if stat.S_ISLNK(mymode):
12672                                 # We want to treat it like a directory if it
12673                                 # is a symlink to an existing directory.
12674                                 try:
12675                                         real_mode = os.stat(x).st_mode
12676                                         if stat.S_ISDIR(real_mode):
12677                                                 mymode = real_mode
12678                                 except OSError:
12679                                         pass
12680                         if stat.S_ISDIR(mymode):
12681                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
12682                         else:
12683                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
12684                                         os.path.split(x.rstrip(os.path.sep))
12685                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
12686                         a = commands.getstatusoutput(mycommand)
12687                         if a[0] != 0:
12688                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
12689                                 sys.stderr.flush()
12690                                 # Show the error message alone, sending stdout to /dev/null.
12691                                 os.system(mycommand + " 1>/dev/null")
12692                         else:
12693                                 files = a[1].split('\0')
12694                                 # split always produces an empty string as the last element
12695                                 if files and not files[-1]:
12696                                         del files[-1]
12697                                 if files:
12698                                         procount += 1
12699                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
12700                                         if stat.S_ISDIR(mymode):
12701                                                  print "%d config files in '%s' need updating." % \
12702                                                         (len(files), x)
12703                                         else:
12704                                                  print "config file '%s' needs updating." % x
12705
12706                 if procount:
12707                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
12708                                 " section of the " + bold("emerge")
12709                         print " "+yellow("*")+" man page to learn how to update config files."
12710
12711 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
12712         update=False):
12713         """
12714         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12715         Returns the number of unread (yet relevent) items.
12716         
12717         @param portdb: a portage tree database
12718         @type portdb: pordbapi
12719         @param vardb: an installed package database
12720         @type vardb: vardbapi
12721         @param NEWS_PATH:
12722         @type NEWS_PATH:
12723         @param UNREAD_PATH:
12724         @type UNREAD_PATH:
12725         @param repo_id:
12726         @type repo_id:
12727         @rtype: Integer
12728         @returns:
12729         1.  The number of unread but relevant news items.
12730         
12731         """
12732         from portage.news import NewsManager
12733         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12734         return manager.getUnreadItems( repo_id, update=update )
12735
12736 def insert_category_into_atom(atom, category):
12737         alphanum = re.search(r'\w', atom)
12738         if alphanum:
12739                 ret = atom[:alphanum.start()] + "%s/" % category + \
12740                         atom[alphanum.start():]
12741         else:
12742                 ret = None
12743         return ret
12744
12745 def is_valid_package_atom(x):
12746         if "/" not in x:
12747                 alphanum = re.search(r'\w', x)
12748                 if alphanum:
12749                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12750         return portage.isvalidatom(x)
12751
12752 def show_blocker_docs_link():
12753         print
12754         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12755         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12756         print
12757         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12758         print
12759
12760 def show_mask_docs():
12761         print "For more information, see the MASKED PACKAGES section in the emerge"
12762         print "man page or refer to the Gentoo Handbook."
12763
12764 def action_sync(settings, trees, mtimedb, myopts, myaction):
12765         xterm_titles = "notitles" not in settings.features
12766         emergelog(xterm_titles, " === sync")
12767         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12768         myportdir = portdb.porttree_root
12769         out = portage.output.EOutput()
12770         if not myportdir:
12771                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12772                 sys.exit(1)
12773         if myportdir[-1]=="/":
12774                 myportdir=myportdir[:-1]
12775         try:
12776                 st = os.stat(myportdir)
12777         except OSError:
12778                 st = None
12779         if st is None:
12780                 print ">>>",myportdir,"not found, creating it."
12781                 os.makedirs(myportdir,0755)
12782                 st = os.stat(myportdir)
12783
12784         spawn_kwargs = {}
12785         spawn_kwargs["env"] = settings.environ()
12786         if 'usersync' in settings.features and \
12787                 portage.data.secpass >= 2 and \
12788                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12789                 st.st_gid != os.getgid() and st.st_mode & 0070):
12790                 try:
12791                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12792                 except KeyError:
12793                         pass
12794                 else:
12795                         # Drop privileges when syncing, in order to match
12796                         # existing uid/gid settings.
12797                         spawn_kwargs["uid"]    = st.st_uid
12798                         spawn_kwargs["gid"]    = st.st_gid
12799                         spawn_kwargs["groups"] = [st.st_gid]
12800                         spawn_kwargs["env"]["HOME"] = homedir
12801                         umask = 0002
12802                         if not st.st_mode & 0020:
12803                                 umask = umask | 0020
12804                         spawn_kwargs["umask"] = umask
12805
12806         syncuri = settings.get("SYNC", "").strip()
12807         if not syncuri:
12808                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12809                         noiselevel=-1, level=logging.ERROR)
12810                 return 1
12811
12812         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12813         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12814
12815         os.umask(0022)
12816         dosyncuri = syncuri
12817         updatecache_flg = False
12818         if myaction == "metadata":
12819                 print "skipping sync"
12820                 updatecache_flg = True
12821         elif ".git" in vcs_dirs:
12822                 # Update existing git repository, and ignore the syncuri. We are
12823                 # going to trust the user and assume that the user is in the branch
12824                 # that he/she wants updated. We'll let the user manage branches with
12825                 # git directly.
12826                 if portage.process.find_binary("git") is None:
12827                         msg = ["Command not found: git",
12828                         "Type \"emerge dev-util/git\" to enable git support."]
12829                         for l in msg:
12830                                 writemsg_level("!!! %s\n" % l,
12831                                         level=logging.ERROR, noiselevel=-1)
12832                         return 1
12833                 msg = ">>> Starting git pull in %s..." % myportdir
12834                 emergelog(xterm_titles, msg )
12835                 writemsg_level(msg + "\n")
12836                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12837                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12838                 if exitcode != os.EX_OK:
12839                         msg = "!!! git pull error in %s." % myportdir
12840                         emergelog(xterm_titles, msg)
12841                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12842                         return exitcode
12843                 msg = ">>> Git pull in %s successful" % myportdir
12844                 emergelog(xterm_titles, msg)
12845                 writemsg_level(msg + "\n")
12846                 exitcode = git_sync_timestamps(settings, myportdir)
12847                 if exitcode == os.EX_OK:
12848                         updatecache_flg = True
12849         elif syncuri[:8]=="rsync://":
12850                 for vcs_dir in vcs_dirs:
12851                         writemsg_level(("!!! %s appears to be under revision " + \
12852                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12853                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12854                         return 1
12855                 if not os.path.exists("/usr/bin/rsync"):
12856                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12857                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12858                         sys.exit(1)
12859                 mytimeout=180
12860
12861                 rsync_opts = []
12862                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12863                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12864                         rsync_opts.extend([
12865                                 "--recursive",    # Recurse directories
12866                                 "--links",        # Consider symlinks
12867                                 "--safe-links",   # Ignore links outside of tree
12868                                 "--perms",        # Preserve permissions
12869                                 "--times",        # Preserive mod times
12870                                 "--compress",     # Compress the data transmitted
12871                                 "--force",        # Force deletion on non-empty dirs
12872                                 "--whole-file",   # Don't do block transfers, only entire files
12873                                 "--delete",       # Delete files that aren't in the master tree
12874                                 "--stats",        # Show final statistics about what was transfered
12875                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12876                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12877                                 "--exclude=/local",       # Exclude local     from consideration
12878                                 "--exclude=/packages",    # Exclude packages  from consideration
12879                         ])
12880
12881                 else:
12882                         # The below validation is not needed when using the above hardcoded
12883                         # defaults.
12884
12885                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12886                         rsync_opts.extend(
12887                                 shlex.split(settings.get("PORTAGE_RSYNC_OPTS","")))
12888                         for opt in ("--recursive", "--times"):
12889                                 if opt not in rsync_opts:
12890                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12891                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12892                                         rsync_opts.append(opt)
12893         
12894                         for exclude in ("distfiles", "local", "packages"):
12895                                 opt = "--exclude=/%s" % exclude
12896                                 if opt not in rsync_opts:
12897                                         portage.writemsg(yellow("WARNING:") + \
12898                                         " adding required option %s not included in "  % opt + \
12899                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12900                                         rsync_opts.append(opt)
12901         
12902                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12903                                 def rsync_opt_startswith(opt_prefix):
12904                                         for x in rsync_opts:
12905                                                 if x.startswith(opt_prefix):
12906                                                         return True
12907                                         return False
12908
12909                                 if not rsync_opt_startswith("--timeout="):
12910                                         rsync_opts.append("--timeout=%d" % mytimeout)
12911
12912                                 for opt in ("--compress", "--whole-file"):
12913                                         if opt not in rsync_opts:
12914                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12915                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12916                                                 rsync_opts.append(opt)
12917
12918                 if "--quiet" in myopts:
12919                         rsync_opts.append("--quiet")    # Shut up a lot
12920                 else:
12921                         rsync_opts.append("--verbose")  # Print filelist
12922
12923                 if "--verbose" in myopts:
12924                         rsync_opts.append("--progress")  # Progress meter for each file
12925
12926                 if "--debug" in myopts:
12927                         rsync_opts.append("--checksum") # Force checksum on all files
12928
12929                 # Real local timestamp file.
12930                 servertimestampfile = os.path.join(
12931                         myportdir, "metadata", "timestamp.chk")
12932
12933                 content = portage.util.grabfile(servertimestampfile)
12934                 mytimestamp = 0
12935                 if content:
12936                         try:
12937                                 mytimestamp = time.mktime(time.strptime(content[0],
12938                                         "%a, %d %b %Y %H:%M:%S +0000"))
12939                         except (OverflowError, ValueError):
12940                                 pass
12941                 del content
12942
12943                 try:
12944                         rsync_initial_timeout = \
12945                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12946                 except ValueError:
12947                         rsync_initial_timeout = 15
12948
12949                 try:
12950                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12951                 except SystemExit, e:
12952                         raise # Needed else can't exit
12953                 except:
12954                         maxretries=3 #default number of retries
12955
12956                 retries=0
12957                 user_name, hostname, port = re.split(
12958                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12959                 if port is None:
12960                         port=""
12961                 if user_name is None:
12962                         user_name=""
12963                 updatecache_flg=True
12964                 all_rsync_opts = set(rsync_opts)
12965                 extra_rsync_opts = shlex.split(
12966                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS",""))
12967                 all_rsync_opts.update(extra_rsync_opts)
12968                 family = socket.AF_INET
12969                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12970                         family = socket.AF_INET
12971                 elif socket.has_ipv6 and \
12972                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12973                         family = socket.AF_INET6
12974                 ips=[]
12975                 SERVER_OUT_OF_DATE = -1
12976                 EXCEEDED_MAX_RETRIES = -2
12977                 while (1):
12978                         if ips:
12979                                 del ips[0]
12980                         if ips==[]:
12981                                 try:
12982                                         for addrinfo in socket.getaddrinfo(
12983                                                 hostname, None, family, socket.SOCK_STREAM):
12984                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12985                                                         # IPv6 addresses need to be enclosed in square brackets
12986                                                         ips.append("[%s]" % addrinfo[4][0])
12987                                                 else:
12988                                                         ips.append(addrinfo[4][0])
12989                                         from random import shuffle
12990                                         shuffle(ips)
12991                                 except SystemExit, e:
12992                                         raise # Needed else can't exit
12993                                 except Exception, e:
12994                                         print "Notice:",str(e)
12995                                         dosyncuri=syncuri
12996
12997                         if ips:
12998                                 try:
12999                                         dosyncuri = syncuri.replace(
13000                                                 "//" + user_name + hostname + port + "/",
13001                                                 "//" + user_name + ips[0] + port + "/", 1)
13002                                 except SystemExit, e:
13003                                         raise # Needed else can't exit
13004                                 except Exception, e:
13005                                         print "Notice:",str(e)
13006                                         dosyncuri=syncuri
13007
13008                         if (retries==0):
13009                                 if "--ask" in myopts:
13010                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
13011                                                 print
13012                                                 print "Quitting."
13013                                                 print
13014                                                 sys.exit(0)
13015                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
13016                                 if "--quiet" not in myopts:
13017                                         print ">>> Starting rsync with "+dosyncuri+"..."
13018                         else:
13019                                 emergelog(xterm_titles,
13020                                         ">>> Starting retry %d of %d with %s" % \
13021                                                 (retries,maxretries,dosyncuri))
13022                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
13023
13024                         if mytimestamp != 0 and "--quiet" not in myopts:
13025                                 print ">>> Checking server timestamp ..."
13026
13027                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
13028
13029                         if "--debug" in myopts:
13030                                 print rsynccommand
13031
13032                         exitcode = os.EX_OK
13033                         servertimestamp = 0
13034                         # Even if there's no timestamp available locally, fetch the
13035                         # timestamp anyway as an initial probe to verify that the server is
13036                         # responsive.  This protects us from hanging indefinitely on a
13037                         # connection attempt to an unresponsive server which rsync's
13038                         # --timeout option does not prevent.
13039                         if True:
13040                                 # Temporary file for remote server timestamp comparison.
13041                                 from tempfile import mkstemp
13042                                 fd, tmpservertimestampfile = mkstemp()
13043                                 os.close(fd)
13044                                 mycommand = rsynccommand[:]
13045                                 mycommand.append(dosyncuri.rstrip("/") + \
13046                                         "/metadata/timestamp.chk")
13047                                 mycommand.append(tmpservertimestampfile)
13048                                 content = None
13049                                 mypids = []
13050                                 try:
13051                                         def timeout_handler(signum, frame):
13052                                                 raise portage.exception.PortageException("timed out")
13053                                         signal.signal(signal.SIGALRM, timeout_handler)
13054                                         # Timeout here in case the server is unresponsive.  The
13055                                         # --timeout rsync option doesn't apply to the initial
13056                                         # connection attempt.
13057                                         if rsync_initial_timeout:
13058                                                 signal.alarm(rsync_initial_timeout)
13059                                         try:
13060                                                 mypids.extend(portage.process.spawn(
13061                                                         mycommand, env=settings.environ(), returnpid=True))
13062                                                 exitcode = os.waitpid(mypids[0], 0)[1]
13063                                                 content = portage.grabfile(tmpservertimestampfile)
13064                                         finally:
13065                                                 if rsync_initial_timeout:
13066                                                         signal.alarm(0)
13067                                                 try:
13068                                                         os.unlink(tmpservertimestampfile)
13069                                                 except OSError:
13070                                                         pass
13071                                 except portage.exception.PortageException, e:
13072                                         # timed out
13073                                         print e
13074                                         del e
13075                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
13076                                                 os.kill(mypids[0], signal.SIGTERM)
13077                                                 os.waitpid(mypids[0], 0)
13078                                         # This is the same code rsync uses for timeout.
13079                                         exitcode = 30
13080                                 else:
13081                                         if exitcode != os.EX_OK:
13082                                                 if exitcode & 0xff:
13083                                                         exitcode = (exitcode & 0xff) << 8
13084                                                 else:
13085                                                         exitcode = exitcode >> 8
13086                                 if mypids:
13087                                         portage.process.spawned_pids.remove(mypids[0])
13088                                 if content:
13089                                         try:
13090                                                 servertimestamp = time.mktime(time.strptime(
13091                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
13092                                         except (OverflowError, ValueError):
13093                                                 pass
13094                                 del mycommand, mypids, content
13095                         if exitcode == os.EX_OK:
13096                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
13097                                         emergelog(xterm_titles,
13098                                                 ">>> Cancelling sync -- Already current.")
13099                                         print
13100                                         print ">>>"
13101                                         print ">>> Timestamps on the server and in the local repository are the same."
13102                                         print ">>> Cancelling all further sync action. You are already up to date."
13103                                         print ">>>"
13104                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13105                                         print ">>>"
13106                                         print
13107                                         sys.exit(0)
13108                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
13109                                         emergelog(xterm_titles,
13110                                                 ">>> Server out of date: %s" % dosyncuri)
13111                                         print
13112                                         print ">>>"
13113                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
13114                                         print ">>>"
13115                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
13116                                         print ">>>"
13117                                         print
13118                                         exitcode = SERVER_OUT_OF_DATE
13119                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
13120                                         # actual sync
13121                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
13122                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
13123                                         if exitcode in [0,1,3,4,11,14,20,21]:
13124                                                 break
13125                         elif exitcode in [1,3,4,11,14,20,21]:
13126                                 break
13127                         else:
13128                                 # Code 2 indicates protocol incompatibility, which is expected
13129                                 # for servers with protocol < 29 that don't support
13130                                 # --prune-empty-directories.  Retry for a server that supports
13131                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
13132                                 pass
13133
13134                         retries=retries+1
13135
13136                         if retries<=maxretries:
13137                                 print ">>> Retrying..."
13138                                 time.sleep(11)
13139                         else:
13140                                 # over retries
13141                                 # exit loop
13142                                 updatecache_flg=False
13143                                 exitcode = EXCEEDED_MAX_RETRIES
13144                                 break
13145
13146                 if (exitcode==0):
13147                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
13148                 elif exitcode == SERVER_OUT_OF_DATE:
13149                         sys.exit(1)
13150                 elif exitcode == EXCEEDED_MAX_RETRIES:
13151                         sys.stderr.write(
13152                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
13153                         sys.exit(1)
13154                 elif (exitcode>0):
13155                         msg = []
13156                         if exitcode==1:
13157                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
13158                                 msg.append("that your SYNC statement is proper.")
13159                                 msg.append("SYNC=" + settings["SYNC"])
13160                         elif exitcode==11:
13161                                 msg.append("Rsync has reported that there is a File IO error. Normally")
13162                                 msg.append("this means your disk is full, but can be caused by corruption")
13163                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
13164                                 msg.append("and try again after the problem has been fixed.")
13165                                 msg.append("PORTDIR=" + settings["PORTDIR"])
13166                         elif exitcode==20:
13167                                 msg.append("Rsync was killed before it finished.")
13168                         else:
13169                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
13170                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
13171                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
13172                                 msg.append("temporary problem unless complications exist with your network")
13173                                 msg.append("(and possibly your system's filesystem) configuration.")
13174                         for line in msg:
13175                                 out.eerror(line)
13176                         sys.exit(exitcode)
13177         elif syncuri[:6]=="cvs://":
13178                 if not os.path.exists("/usr/bin/cvs"):
13179                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
13180                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
13181                         sys.exit(1)
13182                 cvsroot=syncuri[6:]
13183                 cvsdir=os.path.dirname(myportdir)
13184                 if not os.path.exists(myportdir+"/CVS"):
13185                         #initial checkout
13186                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
13187                         if os.path.exists(cvsdir+"/gentoo-x86"):
13188                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
13189                                 sys.exit(1)
13190                         try:
13191                                 os.rmdir(myportdir)
13192                         except OSError, e:
13193                                 if e.errno != errno.ENOENT:
13194                                         sys.stderr.write(
13195                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
13196                                         sys.exit(1)
13197                                 del e
13198                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
13199                                 print "!!! cvs checkout error; exiting."
13200                                 sys.exit(1)
13201                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
13202                 else:
13203                         #cvs update
13204                         print ">>> Starting cvs update with "+syncuri+"..."
13205                         retval = portage.process.spawn_bash(
13206                                 "cd %s; cvs -z0 -q update -dP" % \
13207                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
13208                         if retval != os.EX_OK:
13209                                 sys.exit(retval)
13210                 dosyncuri = syncuri
13211         else:
13212                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
13213                         noiselevel=-1, level=logging.ERROR)
13214                 return 1
13215
13216         if updatecache_flg and  \
13217                 myaction != "metadata" and \
13218                 "metadata-transfer" not in settings.features:
13219                 updatecache_flg = False
13220
13221         # Reload the whole config from scratch.
13222         settings, trees, mtimedb = load_emerge_config(trees=trees)
13223         root_config = trees[settings["ROOT"]]["root_config"]
13224         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13225
13226         if updatecache_flg and \
13227                 os.path.exists(os.path.join(myportdir, 'metadata', 'cache')):
13228
13229                 # Only update cache for myportdir since that's
13230                 # the only one that's been synced here.
13231                 action_metadata(settings, portdb, myopts, porttrees=[myportdir])
13232
13233         if portage._global_updates(trees, mtimedb["updates"]):
13234                 mtimedb.commit()
13235                 # Reload the whole config from scratch.
13236                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13237                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13238                 root_config = trees[settings["ROOT"]]["root_config"]
13239
13240         mybestpv = portdb.xmatch("bestmatch-visible",
13241                 portage.const.PORTAGE_PACKAGE_ATOM)
13242         mypvs = portage.best(
13243                 trees[settings["ROOT"]]["vartree"].dbapi.match(
13244                 portage.const.PORTAGE_PACKAGE_ATOM))
13245
13246         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
13247
13248         if myaction != "metadata":
13249                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
13250                         retval = portage.process.spawn(
13251                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
13252                                 dosyncuri], env=settings.environ())
13253                         if retval != os.EX_OK:
13254                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
13255
13256         if(mybestpv != mypvs) and not "--quiet" in myopts:
13257                 print
13258                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
13259                 print red(" * ")+"that you update portage now, before any other packages are updated."
13260                 print
13261                 print red(" * ")+"To update portage, run 'emerge portage' now."
13262                 print
13263         
13264         display_news_notification(root_config, myopts)
13265         return os.EX_OK
13266
13267 def git_sync_timestamps(settings, portdir):
13268         """
13269         Since git doesn't preserve timestamps, synchronize timestamps between
13270         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
13271         for a given file as long as the file in the working tree is not modified
13272         (relative to HEAD).
13273         """
13274         cache_dir = os.path.join(portdir, "metadata", "cache")
13275         if not os.path.isdir(cache_dir):
13276                 return os.EX_OK
13277         writemsg_level(">>> Synchronizing timestamps...\n")
13278
13279         from portage.cache.cache_errors import CacheError
13280         try:
13281                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
13282                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
13283         except CacheError, e:
13284                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
13285                         level=logging.ERROR, noiselevel=-1)
13286                 return 1
13287
13288         ec_dir = os.path.join(portdir, "eclass")
13289         try:
13290                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
13291                         if f.endswith(".eclass"))
13292         except OSError, e:
13293                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
13294                         level=logging.ERROR, noiselevel=-1)
13295                 return 1
13296
13297         args = [portage.const.BASH_BINARY, "-c",
13298                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
13299                 portage._shell_quote(portdir)]
13300         import subprocess
13301         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
13302         modified_files = set(l.rstrip("\n") for l in proc.stdout)
13303         rval = proc.wait()
13304         if rval != os.EX_OK:
13305                 return rval
13306
13307         modified_eclasses = set(ec for ec in ec_names \
13308                 if os.path.join("eclass", ec + ".eclass") in modified_files)
13309
13310         updated_ec_mtimes = {}
13311
13312         for cpv in cache_db:
13313                 cpv_split = portage.catpkgsplit(cpv)
13314                 if cpv_split is None:
13315                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
13316                                 level=logging.ERROR, noiselevel=-1)
13317                         continue
13318
13319                 cat, pn, ver, rev = cpv_split
13320                 cat, pf = portage.catsplit(cpv)
13321                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
13322                 if relative_eb_path in modified_files:
13323                         continue
13324
13325                 try:
13326                         cache_entry = cache_db[cpv]
13327                         eb_mtime = cache_entry.get("_mtime_")
13328                         ec_mtimes = cache_entry.get("_eclasses_")
13329                 except KeyError:
13330                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
13331                                 level=logging.ERROR, noiselevel=-1)
13332                         continue
13333                 except CacheError, e:
13334                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
13335                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
13336                         continue
13337
13338                 if eb_mtime is None:
13339                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
13340                                 level=logging.ERROR, noiselevel=-1)
13341                         continue
13342
13343                 try:
13344                         eb_mtime = long(eb_mtime)
13345                 except ValueError:
13346                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
13347                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
13348                         continue
13349
13350                 if ec_mtimes is None:
13351                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
13352                                 level=logging.ERROR, noiselevel=-1)
13353                         continue
13354
13355                 if modified_eclasses.intersection(ec_mtimes):
13356                         continue
13357
13358                 missing_eclasses = set(ec_mtimes).difference(ec_names)
13359                 if missing_eclasses:
13360                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
13361                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
13362                                 noiselevel=-1)
13363                         continue
13364
13365                 eb_path = os.path.join(portdir, relative_eb_path)
13366                 try:
13367                         current_eb_mtime = os.stat(eb_path)
13368                 except OSError:
13369                         writemsg_level("!!! Missing ebuild: %s\n" % \
13370                                 (cpv,), level=logging.ERROR, noiselevel=-1)
13371                         continue
13372
13373                 inconsistent = False
13374                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13375                         updated_mtime = updated_ec_mtimes.get(ec)
13376                         if updated_mtime is not None and updated_mtime != ec_mtime:
13377                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
13378                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
13379                                 inconsistent = True
13380                                 break
13381
13382                 if inconsistent:
13383                         continue
13384
13385                 if current_eb_mtime != eb_mtime:
13386                         os.utime(eb_path, (eb_mtime, eb_mtime))
13387
13388                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
13389                         if ec in updated_ec_mtimes:
13390                                 continue
13391                         ec_path = os.path.join(ec_dir, ec + ".eclass")
13392                         current_mtime = long(os.stat(ec_path).st_mtime)
13393                         if current_mtime != ec_mtime:
13394                                 os.utime(ec_path, (ec_mtime, ec_mtime))
13395                         updated_ec_mtimes[ec] = ec_mtime
13396
13397         return os.EX_OK
13398
13399 def action_metadata(settings, portdb, myopts, porttrees=None):
13400         if porttrees is None:
13401                 porttrees = portdb.porttrees
13402         portage.writemsg_stdout("\n>>> Updating Portage cache\n")
13403         old_umask = os.umask(0002)
13404         cachedir = os.path.normpath(settings.depcachedir)
13405         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
13406                                         "/lib", "/opt", "/proc", "/root", "/sbin",
13407                                         "/sys", "/tmp", "/usr",  "/var"]:
13408                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
13409                         "ROOT DIRECTORY ON YOUR SYSTEM."
13410                 print >> sys.stderr, \
13411                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
13412                 sys.exit(73)
13413         if not os.path.exists(cachedir):
13414                 os.makedirs(cachedir)
13415
13416         auxdbkeys = [x for x in portage.auxdbkeys if not x.startswith("UNUSED_0")]
13417         auxdbkeys = tuple(auxdbkeys)
13418
13419         class TreeData(object):
13420                 __slots__ = ('dest_db', 'eclass_db', 'path', 'src_db', 'valid_nodes')
13421                 def __init__(self, dest_db, eclass_db, path, src_db):
13422                         self.dest_db = dest_db
13423                         self.eclass_db = eclass_db
13424                         self.path = path
13425                         self.src_db = src_db
13426                         self.valid_nodes = set()
13427
13428         porttrees_data = []
13429         for path in porttrees:
13430                 src_db = portdb._pregen_auxdb.get(path)
13431                 if src_db is None and \
13432                         os.path.isdir(os.path.join(path, 'metadata', 'cache')):
13433                         src_db = portdb.metadbmodule(
13434                                 path, 'metadata/cache', auxdbkeys, readonly=True)
13435                         try:
13436                                 src_db.ec = portdb._repo_info[path].eclass_db
13437                         except AttributeError:
13438                                 pass
13439
13440                 if src_db is not None:
13441                         porttrees_data.append(TreeData(portdb.auxdb[path],
13442                                 portdb._repo_info[path].eclass_db, path, src_db))
13443
13444         porttrees = [tree_data.path for tree_data in porttrees_data]
13445
13446         isatty = sys.stdout.isatty()
13447         quiet = not isatty or '--quiet' in myopts
13448         onProgress = None
13449         if not quiet:
13450                 progressBar = portage.output.TermProgressBar()
13451                 progressHandler = ProgressHandler()
13452                 onProgress = progressHandler.onProgress
13453                 def display():
13454                         progressBar.set(progressHandler.curval, progressHandler.maxval)
13455                 progressHandler.display = display
13456                 def sigwinch_handler(signum, frame):
13457                         lines, progressBar.term_columns = \
13458                                 portage.output.get_term_size()
13459                 signal.signal(signal.SIGWINCH, sigwinch_handler)
13460
13461         # Temporarily override portdb.porttrees so portdb.cp_all()
13462         # will only return the relevant subset.
13463         portdb_porttrees = portdb.porttrees
13464         portdb.porttrees = porttrees
13465         try:
13466                 cp_all = portdb.cp_all()
13467         finally:
13468                 portdb.porttrees = portdb_porttrees
13469
13470         curval = 0
13471         maxval = len(cp_all)
13472         if onProgress is not None:
13473                 onProgress(maxval, curval)
13474
13475         from portage.cache.util import quiet_mirroring
13476         from portage import eapi_is_supported, \
13477                 _validate_cache_for_unsupported_eapis
13478
13479         # TODO: Display error messages, but do not interfere with the progress bar.
13480         # Here's how:
13481         #  1) erase the progress bar
13482         #  2) show the error message
13483         #  3) redraw the progress bar on a new line
13484         noise = quiet_mirroring()
13485
13486         for cp in cp_all:
13487                 for tree_data in porttrees_data:
13488                         for cpv in portdb.cp_list(cp, mytree=tree_data.path):
13489                                 tree_data.valid_nodes.add(cpv)
13490                                 try:
13491                                         src = tree_data.src_db[cpv]
13492                                 except KeyError, e:
13493                                         noise.missing_entry(cpv)
13494                                         del e
13495                                         continue
13496                                 except CacheError, ce:
13497                                         noise.exception(cpv, ce)
13498                                         del ce
13499                                         continue
13500
13501                                 eapi = src.get('EAPI')
13502                                 if not eapi:
13503                                         eapi = '0'
13504                                 eapi = eapi.lstrip('-')
13505                                 eapi_supported = eapi_is_supported(eapi)
13506                                 if not eapi_supported:
13507                                         if not _validate_cache_for_unsupported_eapis:
13508                                                 noise.misc(cpv, "unable to validate " + \
13509                                                         "cache for EAPI='%s'" % eapi)
13510                                                 continue
13511
13512                                 dest = None
13513                                 try:
13514                                         dest = tree_data.dest_db[cpv]
13515                                 except (KeyError, CacheError):
13516                                         pass
13517
13518                                 for d in (src, dest):
13519                                         if d is not None and d.get('EAPI') in ('', '0'):
13520                                                 del d['EAPI']
13521
13522                                 if dest is not None:
13523                                         if not (dest['_mtime_'] == src['_mtime_'] and \
13524                                                 tree_data.eclass_db.is_eclass_data_valid(
13525                                                         dest['_eclasses_']) and \
13526                                                 set(dest['_eclasses_']) == set(src['_eclasses_'])):
13527                                                 dest = None
13528                                         else:
13529                                                 # We don't want to skip the write unless we're really
13530                                                 # sure that the existing cache is identical, so don't
13531                                                 # trust _mtime_ and _eclasses_ alone.
13532                                                 for k in set(chain(src, dest)).difference(
13533                                                         ('_mtime_', '_eclasses_')):
13534                                                         if dest.get(k, '') != src.get(k, ''):
13535                                                                 dest = None
13536                                                                 break
13537
13538                                 if dest is not None:
13539                                         # The existing data is valid and identical,
13540                                         # so there's no need to overwrite it.
13541                                         continue
13542
13543                                 try:
13544                                         inherited = src.get('INHERITED', '')
13545                                         eclasses = src.get('_eclasses_')
13546                                 except CacheError, ce:
13547                                         noise.exception(cpv, ce)
13548                                         del ce
13549                                         continue
13550
13551                                 if eclasses is not None:
13552                                         if not tree_data.eclass_db.is_eclass_data_valid(
13553                                                 src['_eclasses_']):
13554                                                 noise.eclass_stale(cpv)
13555                                                 continue
13556                                         inherited = eclasses
13557                                 else:
13558                                         inherited = inherited.split()
13559
13560                                 if tree_data.src_db.complete_eclass_entries and \
13561                                         eclasses is None:
13562                                         noise.corruption(cpv, "missing _eclasses_ field")
13563                                         continue
13564
13565                                 if inherited:
13566                                         # Even if _eclasses_ already exists, replace it with data from
13567                                         # eclass_cache, in order to insert local eclass paths.
13568                                         try:
13569                                                 eclasses = tree_data.eclass_db.get_eclass_data(inherited)
13570                                         except KeyError:
13571                                                 # INHERITED contains a non-existent eclass.
13572                                                 noise.eclass_stale(cpv)
13573                                                 continue
13574
13575                                         if eclasses is None:
13576                                                 noise.eclass_stale(cpv)
13577                                                 continue
13578                                         src['_eclasses_'] = eclasses
13579                                 else:
13580                                         src['_eclasses_'] = {}
13581
13582                                 if not eapi_supported:
13583                                         src = {
13584                                                 'EAPI'       : '-' + eapi,
13585                                                 '_mtime_'    : src['_mtime_'],
13586                                                 '_eclasses_' : src['_eclasses_'],
13587                                         }
13588
13589                                 try:
13590                                         tree_data.dest_db[cpv] = src
13591                                 except CacheError, ce:
13592                                         noise.exception(cpv, ce)
13593                                         del ce
13594
13595                 curval += 1
13596                 if onProgress is not None:
13597                         onProgress(maxval, curval)
13598
13599         if onProgress is not None:
13600                 onProgress(maxval, curval)
13601
13602         for tree_data in porttrees_data:
13603                 try:
13604                         dead_nodes = set(tree_data.dest_db.iterkeys())
13605                 except CacheError, e:
13606                         writemsg_level("Error listing cache entries for " + \
13607                                 "'%s': %s, continuing...\n" % (tree_data.path, e),
13608                                 level=logging.ERROR, noiselevel=-1)
13609                         del e
13610                 else:
13611                         dead_nodes.difference_update(tree_data.valid_nodes)
13612                         for cpv in dead_nodes:
13613                                 try:
13614                                         tree_data.dest_db[cpv]
13615                                 except (KeyError, CacheError):
13616                                         pass
13617
13618         if not quiet:
13619                 # make sure the final progress is displayed
13620                 progressHandler.display()
13621                 print
13622                 signal.signal(signal.SIGWINCH, signal.SIG_DFL)
13623
13624         sys.stdout.flush()
13625         os.umask(old_umask)
13626
13627 def action_regen(settings, portdb, max_jobs, max_load):
13628         xterm_titles = "notitles" not in settings.features
13629         emergelog(xterm_titles, " === regen")
13630         #regenerate cache entries
13631         portage.writemsg_stdout("Regenerating cache entries...\n")
13632         try:
13633                 os.close(sys.stdin.fileno())
13634         except SystemExit, e:
13635                 raise # Needed else can't exit
13636         except:
13637                 pass
13638         sys.stdout.flush()
13639
13640         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
13641         regen.run()
13642
13643         portage.writemsg_stdout("done!\n")
13644         return regen.returncode
13645
13646 def action_config(settings, trees, myopts, myfiles):
13647         if len(myfiles) != 1:
13648                 print red("!!! config can only take a single package atom at this time\n")
13649                 sys.exit(1)
13650         if not is_valid_package_atom(myfiles[0]):
13651                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
13652                         noiselevel=-1)
13653                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
13654                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
13655                 sys.exit(1)
13656         print
13657         try:
13658                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
13659         except portage.exception.AmbiguousPackageName, e:
13660                 # Multiple matches thrown from cpv_expand
13661                 pkgs = e.args[0]
13662         if len(pkgs) == 0:
13663                 print "No packages found.\n"
13664                 sys.exit(0)
13665         elif len(pkgs) > 1:
13666                 if "--ask" in myopts:
13667                         options = []
13668                         print "Please select a package to configure:"
13669                         idx = 0
13670                         for pkg in pkgs:
13671                                 idx += 1
13672                                 options.append(str(idx))
13673                                 print options[-1]+") "+pkg
13674                         print "X) Cancel"
13675                         options.append("X")
13676                         idx = userquery("Selection?", options)
13677                         if idx == "X":
13678                                 sys.exit(0)
13679                         pkg = pkgs[int(idx)-1]
13680                 else:
13681                         print "The following packages available:"
13682                         for pkg in pkgs:
13683                                 print "* "+pkg
13684                         print "\nPlease use a specific atom or the --ask option."
13685                         sys.exit(1)
13686         else:
13687                 pkg = pkgs[0]
13688
13689         print
13690         if "--ask" in myopts:
13691                 if userquery("Ready to configure "+pkg+"?") == "No":
13692                         sys.exit(0)
13693         else:
13694                 print "Configuring pkg..."
13695         print
13696         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
13697         mysettings = portage.config(clone=settings)
13698         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
13699         debug = mysettings.get("PORTAGE_DEBUG") == "1"
13700         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
13701                 mysettings,
13702                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
13703                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
13704         if retval == os.EX_OK:
13705                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
13706                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
13707         print
13708
13709 def action_info(settings, trees, myopts, myfiles):
13710         print getportageversion(settings["PORTDIR"], settings["ROOT"],
13711                 settings.profile_path, settings["CHOST"],
13712                 trees[settings["ROOT"]]["vartree"].dbapi)
13713         header_width = 65
13714         header_title = "System Settings"
13715         if myfiles:
13716                 print header_width * "="
13717                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13718         print header_width * "="
13719         print "System uname: "+platform.platform(aliased=1)
13720
13721         lastSync = portage.grabfile(os.path.join(
13722                 settings["PORTDIR"], "metadata", "timestamp.chk"))
13723         print "Timestamp of tree:",
13724         if lastSync:
13725                 print lastSync[0]
13726         else:
13727                 print "Unknown"
13728
13729         output=commands.getstatusoutput("distcc --version")
13730         if not output[0]:
13731                 print str(output[1].split("\n",1)[0]),
13732                 if "distcc" in settings.features:
13733                         print "[enabled]"
13734                 else:
13735                         print "[disabled]"
13736
13737         output=commands.getstatusoutput("ccache -V")
13738         if not output[0]:
13739                 print str(output[1].split("\n",1)[0]),
13740                 if "ccache" in settings.features:
13741                         print "[enabled]"
13742                 else:
13743                         print "[disabled]"
13744
13745         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
13746                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
13747         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
13748         myvars  = portage.util.unique_array(myvars)
13749         myvars.sort()
13750
13751         for x in myvars:
13752                 if portage.isvalidatom(x):
13753                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
13754                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
13755                         pkg_matches.sort(key=cmp_sort_key(portage.pkgcmp))
13756                         pkgs = []
13757                         for pn, ver, rev in pkg_matches:
13758                                 if rev != "r0":
13759                                         pkgs.append(ver + "-" + rev)
13760                                 else:
13761                                         pkgs.append(ver)
13762                         if pkgs:
13763                                 pkgs = ", ".join(pkgs)
13764                                 print "%-20s %s" % (x+":", pkgs)
13765                 else:
13766                         print "%-20s %s" % (x+":", "[NOT VALID]")
13767
13768         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
13769
13770         if "--verbose" in myopts:
13771                 myvars=settings.keys()
13772         else:
13773                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
13774                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
13775                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
13776                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
13777
13778                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
13779
13780         myvars = portage.util.unique_array(myvars)
13781         use_expand = settings.get('USE_EXPAND', '').split()
13782         use_expand.sort()
13783         use_expand_hidden = set(
13784                 settings.get('USE_EXPAND_HIDDEN', '').upper().split())
13785         alphabetical_use = '--alphabetical' in myopts
13786         root_config = trees[settings["ROOT"]]['root_config']
13787         unset_vars = []
13788         myvars.sort()
13789         for x in myvars:
13790                 if x in settings:
13791                         if x != "USE":
13792                                 print '%s="%s"' % (x, settings[x])
13793                         else:
13794                                 use = set(settings["USE"].split())
13795                                 for varname in use_expand:
13796                                         flag_prefix = varname.lower() + "_"
13797                                         for f in list(use):
13798                                                 if f.startswith(flag_prefix):
13799                                                         use.remove(f)
13800                                 use = list(use)
13801                                 use.sort()
13802                                 print 'USE="%s"' % " ".join(use),
13803                                 for varname in use_expand:
13804                                         myval = settings.get(varname)
13805                                         if myval:
13806                                                 print '%s="%s"' % (varname, myval),
13807                                 print
13808                 else:
13809                         unset_vars.append(x)
13810         if unset_vars:
13811                 print "Unset:  "+", ".join(unset_vars)
13812         print
13813
13814         if "--debug" in myopts:
13815                 for x in dir(portage):
13816                         module = getattr(portage, x)
13817                         if "cvs_id_string" in dir(module):
13818                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
13819
13820         # See if we can find any packages installed matching the strings
13821         # passed on the command line
13822         mypkgs = []
13823         vardb = trees[settings["ROOT"]]["vartree"].dbapi
13824         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13825         for x in myfiles:
13826                 mypkgs.extend(vardb.match(x))
13827
13828         # If some packages were found...
13829         if mypkgs:
13830                 # Get our global settings (we only print stuff if it varies from
13831                 # the current config)
13832                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
13833                 auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
13834                 auxkeys.append('DEFINED_PHASES')
13835                 global_vals = {}
13836                 pkgsettings = portage.config(clone=settings)
13837
13838                 for myvar in mydesiredvars:
13839                         global_vals[myvar] = set(settings.get(myvar, "").split())
13840
13841                 # Loop through each package
13842                 # Only print settings if they differ from global settings
13843                 header_title = "Package Settings"
13844                 print header_width * "="
13845                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
13846                 print header_width * "="
13847                 from portage.output import EOutput
13848                 out = EOutput()
13849                 for cpv in mypkgs:
13850                         # Get all package specific variables
13851                         metadata = dict(izip(auxkeys, vardb.aux_get(cpv, auxkeys)))
13852                         pkg = Package(built=True, cpv=cpv,
13853                                 installed=True, metadata=izip(Package.metadata_keys,
13854                                 (metadata.get(x, '') for x in Package.metadata_keys)),
13855                                 root_config=root_config, type_name='installed')
13856                         valuesmap = {}
13857                         for k in auxkeys:
13858                                 valuesmap[k] = set(metadata[k].split())
13859
13860                         diff_values = {}
13861                         for myvar in mydesiredvars:
13862                                 # If the package variable doesn't match the
13863                                 # current global variable, something has changed
13864                                 # so set diff_found so we know to print
13865                                 if valuesmap[myvar] != global_vals[myvar]:
13866                                         diff_values[myvar] = valuesmap[myvar]
13867
13868                         print "\n%s was built with the following:" % \
13869                                 colorize("INFORM", str(pkg.cpv))
13870
13871                         pkgsettings.setcpv(pkg)
13872                         forced_flags = set(chain(pkgsettings.useforce,
13873                                 pkgsettings.usemask))
13874                         use = set(pkg.use.enabled)
13875                         use.discard(pkgsettings.get('ARCH'))
13876                         use_expand_flags = set()
13877                         use_enabled = {}
13878                         use_disabled = {}
13879                         for varname in use_expand:
13880                                 flag_prefix = varname.lower() + "_"
13881                                 for f in use:
13882                                         if f.startswith(flag_prefix):
13883                                                 use_expand_flags.add(f)
13884                                                 use_enabled.setdefault(
13885                                                         varname.upper(), []).append(f[len(flag_prefix):])
13886
13887                                 for f in pkg.iuse.all:
13888                                         if f.startswith(flag_prefix):
13889                                                 use_expand_flags.add(f)
13890                                                 if f not in use:
13891                                                         use_disabled.setdefault(
13892                                                                 varname.upper(), []).append(f[len(flag_prefix):])
13893
13894                         var_order = set(use_enabled)
13895                         var_order.update(use_disabled)
13896                         var_order = sorted(var_order)
13897                         var_order.insert(0, 'USE')
13898                         use.difference_update(use_expand_flags)
13899                         use_enabled['USE'] = list(use)
13900                         use_disabled['USE'] = []
13901
13902                         for f in pkg.iuse.all:
13903                                 if f not in use and \
13904                                         f not in use_expand_flags:
13905                                         use_disabled['USE'].append(f)
13906
13907                         for varname in var_order:
13908                                 if varname in use_expand_hidden:
13909                                         continue
13910                                 flags = []
13911                                 for f in use_enabled.get(varname, []):
13912                                         flags.append(UseFlagDisplay(f, True, f in forced_flags))
13913                                 for f in use_disabled.get(varname, []):
13914                                         flags.append(UseFlagDisplay(f, False, f in forced_flags))
13915                                 if alphabetical_use:
13916                                         flags.sort(key=UseFlagDisplay.sort_combined)
13917                                 else:
13918                                         flags.sort(key=UseFlagDisplay.sort_separated)
13919                                 print '%s="%s"' % (varname, ' '.join(str(f) for f in flags)),
13920                         print
13921
13922                         # If a difference was found, print the info for
13923                         # this package.
13924                         if diff_values:
13925                                 # Print package info
13926                                 for myvar in mydesiredvars:
13927                                         if myvar in diff_values:
13928                                                 mylist = list(diff_values[myvar])
13929                                                 mylist.sort()
13930                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13931                         print
13932
13933                         if metadata['DEFINED_PHASES']:
13934                                 if 'info' not in metadata['DEFINED_PHASES'].split():
13935                                         continue
13936
13937                         print ">>> Attempting to run pkg_info() for '%s'" % pkg.cpv
13938                         ebuildpath = vardb.findname(pkg.cpv)
13939                         if not ebuildpath or not os.path.exists(ebuildpath):
13940                                 out.ewarn("No ebuild found for '%s'" % pkg.cpv)
13941                                 continue
13942                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13943                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13944                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13945                                 tree="vartree")
13946
13947 def action_search(root_config, myopts, myfiles, spinner):
13948         if not myfiles:
13949                 print "emerge: no search terms provided."
13950         else:
13951                 searchinstance = search(root_config,
13952                         spinner, "--searchdesc" in myopts,
13953                         "--quiet" not in myopts, "--usepkg" in myopts,
13954                         "--usepkgonly" in myopts)
13955                 for mysearch in myfiles:
13956                         try:
13957                                 searchinstance.execute(mysearch)
13958                         except re.error, comment:
13959                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13960                                 sys.exit(1)
13961                         searchinstance.output()
13962
13963 def action_uninstall(settings, trees, ldpath_mtimes,
13964         opts, action, files, spinner):
13965
13966         # For backward compat, some actions do not require leading '='.
13967         ignore_missing_eq = action in ('clean', 'unmerge')
13968         root = settings['ROOT']
13969         vardb = trees[root]['vartree'].dbapi
13970         valid_atoms = []
13971         lookup_owners = []
13972
13973         # Ensure atoms are valid before calling unmerge().
13974         # For backward compat, leading '=' is not required.
13975         for x in files:
13976                 if is_valid_package_atom(x) or \
13977                         (ignore_missing_eq and is_valid_package_atom('=' + x)):
13978
13979                         try:
13980                                 valid_atoms.append(
13981                                         portage.dep_expand(x, mydb=vardb, settings=settings))
13982                         except portage.exception.AmbiguousPackageName, e:
13983                                 msg = "The short ebuild name \"" + x + \
13984                                         "\" is ambiguous.  Please specify " + \
13985                                         "one of the following " + \
13986                                         "fully-qualified ebuild names instead:"
13987                                 for line in textwrap.wrap(msg, 70):
13988                                         writemsg_level("!!! %s\n" % (line,),
13989                                                 level=logging.ERROR, noiselevel=-1)
13990                                 for i in e[0]:
13991                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13992                                                 level=logging.ERROR, noiselevel=-1)
13993                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13994                                 return 1
13995
13996                 elif x.startswith(os.sep):
13997                         if not x.startswith(root):
13998                                 writemsg_level(("!!! '%s' does not start with" + \
13999                                         " $ROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
14000                                 return 1
14001                         # Queue these up since it's most efficient to handle
14002                         # multiple files in a single iter_owners() call.
14003                         lookup_owners.append(x)
14004
14005                 else:
14006                         msg = []
14007                         msg.append("'%s' is not a valid package atom." % (x,))
14008                         msg.append("Please check ebuild(5) for full details.")
14009                         writemsg_level("".join("!!! %s\n" % line for line in msg),
14010                                 level=logging.ERROR, noiselevel=-1)
14011                         return 1
14012
14013         if lookup_owners:
14014                 relative_paths = []
14015                 search_for_multiple = False
14016                 if len(lookup_owners) > 1:
14017                         search_for_multiple = True
14018
14019                 for x in lookup_owners:
14020                         if not search_for_multiple and os.path.isdir(x):
14021                                 search_for_multiple = True
14022                         relative_paths.append(x[len(root):])
14023
14024                 owners = set()
14025                 for pkg, relative_path in \
14026                         vardb._owners.iter_owners(relative_paths):
14027                         owners.add(pkg.mycpv)
14028                         if not search_for_multiple:
14029                                 break
14030
14031                 if owners:
14032                         for cpv in owners:
14033                                 slot = vardb.aux_get(cpv, ['SLOT'])[0]
14034                                 if not slot:
14035                                         # portage now masks packages with missing slot, but it's
14036                                         # possible that one was installed by an older version
14037                                         atom = portage.cpv_getkey(cpv)
14038                                 else:
14039                                         atom = '%s:%s' % (portage.cpv_getkey(cpv), slot)
14040                                 valid_atoms.append(portage.dep.Atom(atom))
14041                 else:
14042                         writemsg_level(("!!! '%s' is not claimed " + \
14043                                 "by any package.\n") % lookup_owners[0],
14044                                 level=logging.WARNING, noiselevel=-1)
14045
14046         if files and not valid_atoms:
14047                 return 1
14048
14049         if action in ('clean', 'unmerge') or \
14050                 (action == 'prune' and "--nodeps" in opts):
14051                 # When given a list of atoms, unmerge them in the order given.
14052                 ordered = action == 'unmerge'
14053                 unmerge(trees[settings["ROOT"]]['root_config'], opts, action,
14054                         valid_atoms, ldpath_mtimes, ordered=ordered)
14055                 rval = os.EX_OK
14056         elif action == 'deselect':
14057                 rval = action_deselect(settings, trees, opts, valid_atoms)
14058         else:
14059                 rval = action_depclean(settings, trees, ldpath_mtimes,
14060                         opts, action, valid_atoms, spinner)
14061
14062         return rval
14063
14064 def action_deselect(settings, trees, opts, atoms):
14065         root_config = trees[settings['ROOT']]['root_config']
14066         world_set = root_config.sets['world']
14067         if not hasattr(world_set, 'update'):
14068                 writemsg_level("World set does not appear to be mutable.\n",
14069                         level=logging.ERROR, noiselevel=-1)
14070                 return 1
14071
14072         vardb = root_config.trees['vartree'].dbapi
14073         expanded_atoms = set(atoms)
14074         from portage.dep import Atom
14075         for atom in atoms:
14076                 for cpv in vardb.match(atom):
14077                         slot, = vardb.aux_get(cpv, ['SLOT'])
14078                         if not slot:
14079                                 slot = '0'
14080                         expanded_atoms.add(Atom('%s:%s' % (portage.cpv_getkey(cpv), slot)))
14081
14082         pretend = '--pretend' in opts
14083         locked = False
14084         if not pretend and hasattr(world_set, 'lock'):
14085                 world_set.lock()
14086                 locked = True
14087         try:
14088                 discard_atoms = set()
14089                 world_set.load()
14090                 for atom in world_set:
14091                         if not isinstance(atom, Atom):
14092                                 # nested set
14093                                 continue
14094                         for arg_atom in expanded_atoms:
14095                                 if arg_atom.intersects(atom) and \
14096                                         not (arg_atom.slot and not atom.slot):
14097                                         discard_atoms.add(atom)
14098                                         break
14099                 if discard_atoms:
14100                         for atom in sorted(discard_atoms):
14101                                 print ">>> Removing %s from \"world\" favorites file..." % \
14102                                         colorize("INFORM", str(atom))
14103
14104                         if '--ask' in opts:
14105                                 prompt = "Would you like to remove these " + \
14106                                         "packages from your world favorites?"
14107                                 if userquery(prompt) == 'No':
14108                                         return os.EX_OK
14109
14110                         remaining = set(world_set)
14111                         remaining.difference_update(discard_atoms)
14112                         if not pretend:
14113                                 world_set.replace(remaining)
14114                 else:
14115                         print ">>> No matching atoms found in \"world\" favorites file..."
14116         finally:
14117                 if locked:
14118                         world_set.unlock()
14119         return os.EX_OK
14120
14121 def action_depclean(settings, trees, ldpath_mtimes,
14122         myopts, action, myfiles, spinner):
14123         # Kill packages that aren't explicitly merged or are required as a
14124         # dependency of another package. World file is explicit.
14125
14126         # Global depclean or prune operations are not very safe when there are
14127         # missing dependencies since it's unknown how badly incomplete
14128         # the dependency graph is, and we might accidentally remove packages
14129         # that should have been pulled into the graph. On the other hand, it's
14130         # relatively safe to ignore missing deps when only asked to remove
14131         # specific packages.
14132         allow_missing_deps = len(myfiles) > 0
14133
14134         msg = []
14135         msg.append("Always study the list of packages to be cleaned for any obvious\n")
14136         msg.append("mistakes. Packages that are part of the world set will always\n")
14137         msg.append("be kept.  They can be manually added to this set with\n")
14138         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
14139         msg.append("package.provided (see portage(5)) will be removed by\n")
14140         msg.append("depclean, even if they are part of the world set.\n")
14141         msg.append("\n")
14142         msg.append("As a safety measure, depclean will not remove any packages\n")
14143         msg.append("unless *all* required dependencies have been resolved.  As a\n")
14144         msg.append("consequence, it is often necessary to run %s\n" % \
14145                 good("`emerge --update"))
14146         msg.append(good("--newuse --deep @system @world`") + \
14147                 " prior to depclean.\n")
14148
14149         if action == "depclean" and "--quiet" not in myopts and not myfiles:
14150                 portage.writemsg_stdout("\n")
14151                 for x in msg:
14152                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
14153
14154         xterm_titles = "notitles" not in settings.features
14155         myroot = settings["ROOT"]
14156         root_config = trees[myroot]["root_config"]
14157         getSetAtoms = root_config.setconfig.getSetAtoms
14158         vardb = trees[myroot]["vartree"].dbapi
14159         deselect = myopts.get('--deselect') != 'n'
14160
14161         required_set_names = ("system", "world")
14162         required_sets = {}
14163         set_args = []
14164
14165         for s in required_set_names:
14166                 required_sets[s] = InternalPackageSet(
14167                         initial_atoms=getSetAtoms(s))
14168
14169         
14170         # When removing packages, use a temporary version of world
14171         # which excludes packages that are intended to be eligible for
14172         # removal.
14173         world_temp_set = required_sets["world"]
14174         system_set = required_sets["system"]
14175
14176         if not system_set or not world_temp_set:
14177
14178                 if not system_set:
14179                         writemsg_level("!!! You have no system list.\n",
14180                                 level=logging.ERROR, noiselevel=-1)
14181
14182                 if not world_temp_set:
14183                         writemsg_level("!!! You have no world file.\n",
14184                                         level=logging.WARNING, noiselevel=-1)
14185
14186                 writemsg_level("!!! Proceeding is likely to " + \
14187                         "break your installation.\n",
14188                         level=logging.WARNING, noiselevel=-1)
14189                 if "--pretend" not in myopts:
14190                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
14191
14192         if action == "depclean":
14193                 emergelog(xterm_titles, " >>> depclean")
14194
14195         import textwrap
14196         args_set = InternalPackageSet()
14197         if myfiles:
14198                 args_set.update(myfiles)
14199                 matched_packages = False
14200                 for x in args_set:
14201                         if vardb.match(x):
14202                                 matched_packages = True
14203                                 break
14204                 if not matched_packages:
14205                         writemsg_level(">>> No packages selected for removal by %s\n" % \
14206                                 action)
14207                         return
14208
14209         writemsg_level("\nCalculating dependencies  ")
14210         resolver_params = create_depgraph_params(myopts, "remove")
14211         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
14212         vardb = resolver.trees[myroot]["vartree"].dbapi
14213
14214         if action == "depclean":
14215
14216                 if args_set:
14217
14218                         if deselect:
14219                                 world_temp_set.clear()
14220
14221                         # Pull in everything that's installed but not matched
14222                         # by an argument atom since we don't want to clean any
14223                         # package if something depends on it.
14224                         for pkg in vardb:
14225                                 spinner.update()
14226
14227                                 try:
14228                                         if args_set.findAtomForPackage(pkg) is None:
14229                                                 world_temp_set.add("=" + pkg.cpv)
14230                                                 continue
14231                                 except portage.exception.InvalidDependString, e:
14232                                         show_invalid_depstring_notice(pkg,
14233                                                 pkg.metadata["PROVIDE"], str(e))
14234                                         del e
14235                                         world_temp_set.add("=" + pkg.cpv)
14236                                         continue
14237
14238         elif action == "prune":
14239
14240                 if deselect:
14241                         world_temp_set.clear()
14242
14243                 # Pull in everything that's installed since we don't
14244                 # to prune a package if something depends on it.
14245                 world_temp_set.update(vardb.cp_all())
14246
14247                 if not args_set:
14248
14249                         # Try to prune everything that's slotted.
14250                         for cp in vardb.cp_all():
14251                                 if len(vardb.cp_list(cp)) > 1:
14252                                         args_set.add(cp)
14253
14254                 # Remove atoms from world that match installed packages
14255                 # that are also matched by argument atoms, but do not remove
14256                 # them if they match the highest installed version.
14257                 for pkg in vardb:
14258                         spinner.update()
14259                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
14260                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
14261                                 raise AssertionError("package expected in matches: " + \
14262                                         "cp = %s, cpv = %s matches = %s" % \
14263                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14264
14265                         highest_version = pkgs_for_cp[-1]
14266                         if pkg == highest_version:
14267                                 # pkg is the highest version
14268                                 world_temp_set.add("=" + pkg.cpv)
14269                                 continue
14270
14271                         if len(pkgs_for_cp) <= 1:
14272                                 raise AssertionError("more packages expected: " + \
14273                                         "cp = %s, cpv = %s matches = %s" % \
14274                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
14275
14276                         try:
14277                                 if args_set.findAtomForPackage(pkg) is None:
14278                                         world_temp_set.add("=" + pkg.cpv)
14279                                         continue
14280                         except portage.exception.InvalidDependString, e:
14281                                 show_invalid_depstring_notice(pkg,
14282                                         pkg.metadata["PROVIDE"], str(e))
14283                                 del e
14284                                 world_temp_set.add("=" + pkg.cpv)
14285                                 continue
14286
14287         set_args = {}
14288         for s, package_set in required_sets.iteritems():
14289                 set_atom = SETPREFIX + s
14290                 set_arg = SetArg(arg=set_atom, set=package_set,
14291                         root_config=resolver.roots[myroot])
14292                 set_args[s] = set_arg
14293                 for atom in set_arg.set:
14294                         resolver._dep_stack.append(
14295                                 Dependency(atom=atom, root=myroot, parent=set_arg))
14296                         resolver.digraph.add(set_arg, None)
14297
14298         success = resolver._complete_graph()
14299         writemsg_level("\b\b... done!\n")
14300
14301         resolver.display_problems()
14302
14303         if not success:
14304                 return 1
14305
14306         def unresolved_deps():
14307
14308                 unresolvable = set()
14309                 for dep in resolver._initially_unsatisfied_deps:
14310                         if isinstance(dep.parent, Package) and \
14311                                 (dep.priority > UnmergeDepPriority.SOFT):
14312                                 unresolvable.add((dep.atom, dep.parent.cpv))
14313
14314                 if not unresolvable:
14315                         return False
14316
14317                 if unresolvable and not allow_missing_deps:
14318                         prefix = bad(" * ")
14319                         msg = []
14320                         msg.append("Dependencies could not be completely resolved due to")
14321                         msg.append("the following required packages not being installed:")
14322                         msg.append("")
14323                         for atom, parent in unresolvable:
14324                                 msg.append("  %s pulled in by:" % (atom,))
14325                                 msg.append("    %s" % (parent,))
14326                                 msg.append("")
14327                         msg.append("Have you forgotten to run " + \
14328                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
14329                         msg.append(("to %s? It may be necessary to manually " + \
14330                                 "uninstall packages that no longer") % action)
14331                         msg.append("exist in the portage tree since " + \
14332                                 "it may not be possible to satisfy their")
14333                         msg.append("dependencies.  Also, be aware of " + \
14334                                 "the --with-bdeps option that is documented")
14335                         msg.append("in " + good("`man emerge`") + ".")
14336                         if action == "prune":
14337                                 msg.append("")
14338                                 msg.append("If you would like to ignore " + \
14339                                         "dependencies then use %s." % good("--nodeps"))
14340                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
14341                                 level=logging.ERROR, noiselevel=-1)
14342                         return True
14343                 return False
14344
14345         if unresolved_deps():
14346                 return 1
14347
14348         graph = resolver.digraph.copy()
14349         required_pkgs_total = 0
14350         for node in graph:
14351                 if isinstance(node, Package):
14352                         required_pkgs_total += 1
14353
14354         def show_parents(child_node):
14355                 parent_nodes = graph.parent_nodes(child_node)
14356                 if not parent_nodes:
14357                         # With --prune, the highest version can be pulled in without any
14358                         # real parent since all installed packages are pulled in.  In that
14359                         # case there's nothing to show here.
14360                         return
14361                 parent_strs = []
14362                 for node in parent_nodes:
14363                         parent_strs.append(str(getattr(node, "cpv", node)))
14364                 parent_strs.sort()
14365                 msg = []
14366                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
14367                 for parent_str in parent_strs:
14368                         msg.append("    %s\n" % (parent_str,))
14369                 msg.append("\n")
14370                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
14371
14372         def cmp_pkg_cpv(pkg1, pkg2):
14373                 """Sort Package instances by cpv."""
14374                 if pkg1.cpv > pkg2.cpv:
14375                         return 1
14376                 elif pkg1.cpv == pkg2.cpv:
14377                         return 0
14378                 else:
14379                         return -1
14380
14381         def create_cleanlist():
14382                 pkgs_to_remove = []
14383
14384                 if action == "depclean":
14385                         if args_set:
14386
14387                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14388                                         arg_atom = None
14389                                         try:
14390                                                 arg_atom = args_set.findAtomForPackage(pkg)
14391                                         except portage.exception.InvalidDependString:
14392                                                 # this error has already been displayed by now
14393                                                 continue
14394
14395                                         if arg_atom:
14396                                                 if pkg not in graph:
14397                                                         pkgs_to_remove.append(pkg)
14398                                                 elif "--verbose" in myopts:
14399                                                         show_parents(pkg)
14400
14401                         else:
14402                                 for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
14403                                         if pkg not in graph:
14404                                                 pkgs_to_remove.append(pkg)
14405                                         elif "--verbose" in myopts:
14406                                                 show_parents(pkg)
14407
14408                 elif action == "prune":
14409                         # Prune really uses all installed instead of world. It's not
14410                         # a real reverse dependency so don't display it as such.
14411                         graph.remove(set_args["world"])
14412
14413                         for atom in args_set:
14414                                 for pkg in vardb.match_pkgs(atom):
14415                                         if pkg not in graph:
14416                                                 pkgs_to_remove.append(pkg)
14417                                         elif "--verbose" in myopts:
14418                                                 show_parents(pkg)
14419
14420                 if not pkgs_to_remove:
14421                         writemsg_level(
14422                                 ">>> No packages selected for removal by %s\n" % action)
14423                         if "--verbose" not in myopts:
14424                                 writemsg_level(
14425                                         ">>> To see reverse dependencies, use %s\n" % \
14426                                                 good("--verbose"))
14427                         if action == "prune":
14428                                 writemsg_level(
14429                                         ">>> To ignore dependencies, use %s\n" % \
14430                                                 good("--nodeps"))
14431
14432                 return pkgs_to_remove
14433
14434         cleanlist = create_cleanlist()
14435
14436         if len(cleanlist):
14437                 clean_set = set(cleanlist)
14438
14439                 # Check if any of these package are the sole providers of libraries
14440                 # with consumers that have not been selected for removal. If so, these
14441                 # packages and any dependencies need to be added to the graph.
14442                 real_vardb = trees[myroot]["vartree"].dbapi
14443                 linkmap = real_vardb.linkmap
14444                 liblist = linkmap.listLibraryObjects()
14445                 consumer_cache = {}
14446                 provider_cache = {}
14447                 soname_cache = {}
14448                 consumer_map = {}
14449
14450                 writemsg_level(">>> Checking for lib consumers...\n")
14451
14452                 for pkg in cleanlist:
14453                         pkg_dblink = real_vardb._dblink(pkg.cpv)
14454                         provided_libs = set()
14455
14456                         for lib in liblist:
14457                                 if pkg_dblink.isowner(lib, myroot):
14458                                         provided_libs.add(lib)
14459
14460                         if not provided_libs:
14461                                 continue
14462
14463                         consumers = {}
14464                         for lib in provided_libs:
14465                                 lib_consumers = consumer_cache.get(lib)
14466                                 if lib_consumers is None:
14467                                         lib_consumers = linkmap.findConsumers(lib)
14468                                         consumer_cache[lib] = lib_consumers
14469                                 if lib_consumers:
14470                                         consumers[lib] = lib_consumers
14471
14472                         if not consumers:
14473                                 continue
14474
14475                         for lib, lib_consumers in consumers.items():
14476                                 for consumer_file in list(lib_consumers):
14477                                         if pkg_dblink.isowner(consumer_file, myroot):
14478                                                 lib_consumers.remove(consumer_file)
14479                                 if not lib_consumers:
14480                                         del consumers[lib]
14481
14482                         if not consumers:
14483                                 continue
14484
14485                         for lib, lib_consumers in consumers.iteritems():
14486
14487                                 soname = soname_cache.get(lib)
14488                                 if soname is None:
14489                                         soname = linkmap.getSoname(lib)
14490                                         soname_cache[lib] = soname
14491
14492                                 consumer_providers = []
14493                                 for lib_consumer in lib_consumers:
14494                                         providers = provider_cache.get(lib)
14495                                         if providers is None:
14496                                                 providers = linkmap.findProviders(lib_consumer)
14497                                                 provider_cache[lib_consumer] = providers
14498                                         if soname not in providers:
14499                                                 # Why does this happen?
14500                                                 continue
14501                                         consumer_providers.append(
14502                                                 (lib_consumer, providers[soname]))
14503
14504                                 consumers[lib] = consumer_providers
14505
14506                         consumer_map[pkg] = consumers
14507
14508                 if consumer_map:
14509
14510                         search_files = set()
14511                         for consumers in consumer_map.itervalues():
14512                                 for lib, consumer_providers in consumers.iteritems():
14513                                         for lib_consumer, providers in consumer_providers:
14514                                                 search_files.add(lib_consumer)
14515                                                 search_files.update(providers)
14516
14517                         writemsg_level(">>> Assigning files to packages...\n")
14518                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
14519
14520                         for pkg, consumers in consumer_map.items():
14521                                 for lib, consumer_providers in consumers.items():
14522                                         lib_consumers = set()
14523
14524                                         for lib_consumer, providers in consumer_providers:
14525                                                 owner_set = file_owners.get(lib_consumer)
14526                                                 provider_dblinks = set()
14527                                                 provider_pkgs = set()
14528
14529                                                 if len(providers) > 1:
14530                                                         for provider in providers:
14531                                                                 provider_set = file_owners.get(provider)
14532                                                                 if provider_set is not None:
14533                                                                         provider_dblinks.update(provider_set)
14534
14535                                                 if len(provider_dblinks) > 1:
14536                                                         for provider_dblink in provider_dblinks:
14537                                                                 pkg_key = ("installed", myroot,
14538                                                                         provider_dblink.mycpv, "nomerge")
14539                                                                 if pkg_key not in clean_set:
14540                                                                         provider_pkgs.add(vardb.get(pkg_key))
14541
14542                                                 if provider_pkgs:
14543                                                         continue
14544
14545                                                 if owner_set is not None:
14546                                                         lib_consumers.update(owner_set)
14547
14548                                         for consumer_dblink in list(lib_consumers):
14549                                                 if ("installed", myroot, consumer_dblink.mycpv,
14550                                                         "nomerge") in clean_set:
14551                                                         lib_consumers.remove(consumer_dblink)
14552                                                         continue
14553
14554                                         if lib_consumers:
14555                                                 consumers[lib] = lib_consumers
14556                                         else:
14557                                                 del consumers[lib]
14558                                 if not consumers:
14559                                         del consumer_map[pkg]
14560
14561                 if consumer_map:
14562                         # TODO: Implement a package set for rebuilding consumer packages.
14563
14564                         msg = "In order to avoid breakage of link level " + \
14565                                 "dependencies, one or more packages will not be removed. " + \
14566                                 "This can be solved by rebuilding " + \
14567                                 "the packages that pulled them in."
14568
14569                         prefix = bad(" * ")
14570                         from textwrap import wrap
14571                         writemsg_level("".join(prefix + "%s\n" % line for \
14572                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
14573
14574                         msg = []
14575                         for pkg, consumers in consumer_map.iteritems():
14576                                 unique_consumers = set(chain(*consumers.values()))
14577                                 unique_consumers = sorted(consumer.mycpv \
14578                                         for consumer in unique_consumers)
14579                                 msg.append("")
14580                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
14581                                 for consumer in unique_consumers:
14582                                         msg.append("    %s" % (consumer,))
14583                         msg.append("")
14584                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
14585                                 level=logging.WARNING, noiselevel=-1)
14586
14587                         # Add lib providers to the graph as children of lib consumers,
14588                         # and also add any dependencies pulled in by the provider.
14589                         writemsg_level(">>> Adding lib providers to graph...\n")
14590
14591                         for pkg, consumers in consumer_map.iteritems():
14592                                 for consumer_dblink in set(chain(*consumers.values())):
14593                                         consumer_pkg = vardb.get(("installed", myroot,
14594                                                 consumer_dblink.mycpv, "nomerge"))
14595                                         if not resolver._add_pkg(pkg,
14596                                                 Dependency(parent=consumer_pkg,
14597                                                 priority=UnmergeDepPriority(runtime=True),
14598                                                 root=pkg.root)):
14599                                                 resolver.display_problems()
14600                                                 return 1
14601
14602                         writemsg_level("\nCalculating dependencies  ")
14603                         success = resolver._complete_graph()
14604                         writemsg_level("\b\b... done!\n")
14605                         resolver.display_problems()
14606                         if not success:
14607                                 return 1
14608                         if unresolved_deps():
14609                                 return 1
14610
14611                         graph = resolver.digraph.copy()
14612                         required_pkgs_total = 0
14613                         for node in graph:
14614                                 if isinstance(node, Package):
14615                                         required_pkgs_total += 1
14616                         cleanlist = create_cleanlist()
14617                         if not cleanlist:
14618                                 return 0
14619                         clean_set = set(cleanlist)
14620
14621                 # Use a topological sort to create an unmerge order such that
14622                 # each package is unmerged before it's dependencies. This is
14623                 # necessary to avoid breaking things that may need to run
14624                 # during pkg_prerm or pkg_postrm phases.
14625
14626                 # Create a new graph to account for dependencies between the
14627                 # packages being unmerged.
14628                 graph = digraph()
14629                 del cleanlist[:]
14630
14631                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
14632                 runtime = UnmergeDepPriority(runtime=True)
14633                 runtime_post = UnmergeDepPriority(runtime_post=True)
14634                 buildtime = UnmergeDepPriority(buildtime=True)
14635                 priority_map = {
14636                         "RDEPEND": runtime,
14637                         "PDEPEND": runtime_post,
14638                         "DEPEND": buildtime,
14639                 }
14640
14641                 for node in clean_set:
14642                         graph.add(node, None)
14643                         mydeps = []
14644                         node_use = node.metadata["USE"].split()
14645                         for dep_type in dep_keys:
14646                                 depstr = node.metadata[dep_type]
14647                                 if not depstr:
14648                                         continue
14649                                 try:
14650                                         portage.dep._dep_check_strict = False
14651                                         success, atoms = portage.dep_check(depstr, None, settings,
14652                                                 myuse=node_use, trees=resolver._graph_trees,
14653                                                 myroot=myroot)
14654                                 finally:
14655                                         portage.dep._dep_check_strict = True
14656                                 if not success:
14657                                         # Ignore invalid deps of packages that will
14658                                         # be uninstalled anyway.
14659                                         continue
14660
14661                                 priority = priority_map[dep_type]
14662                                 for atom in atoms:
14663                                         if not isinstance(atom, portage.dep.Atom):
14664                                                 # Ignore invalid atoms returned from dep_check().
14665                                                 continue
14666                                         if atom.blocker:
14667                                                 continue
14668                                         matches = vardb.match_pkgs(atom)
14669                                         if not matches:
14670                                                 continue
14671                                         for child_node in matches:
14672                                                 if child_node in clean_set:
14673                                                         graph.add(child_node, node, priority=priority)
14674
14675                 ordered = True
14676                 if len(graph.order) == len(graph.root_nodes()):
14677                         # If there are no dependencies between packages
14678                         # let unmerge() group them by cat/pn.
14679                         ordered = False
14680                         cleanlist = [pkg.cpv for pkg in graph.order]
14681                 else:
14682                         # Order nodes from lowest to highest overall reference count for
14683                         # optimal root node selection.
14684                         node_refcounts = {}
14685                         for node in graph.order:
14686                                 node_refcounts[node] = len(graph.parent_nodes(node))
14687                         def cmp_reference_count(node1, node2):
14688                                 return node_refcounts[node1] - node_refcounts[node2]
14689                         graph.order.sort(key=cmp_sort_key(cmp_reference_count))
14690         
14691                         ignore_priority_range = [None]
14692                         ignore_priority_range.extend(
14693                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
14694                         while not graph.empty():
14695                                 for ignore_priority in ignore_priority_range:
14696                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
14697                                         if nodes:
14698                                                 break
14699                                 if not nodes:
14700                                         raise AssertionError("no root nodes")
14701                                 if ignore_priority is not None:
14702                                         # Some deps have been dropped due to circular dependencies,
14703                                         # so only pop one node in order do minimize the number that
14704                                         # are dropped.
14705                                         del nodes[1:]
14706                                 for node in nodes:
14707                                         graph.remove(node)
14708                                         cleanlist.append(node.cpv)
14709
14710                 unmerge(root_config, myopts, "unmerge", cleanlist,
14711                         ldpath_mtimes, ordered=ordered)
14712
14713         if action == "prune":
14714                 return
14715
14716         if not cleanlist and "--quiet" in myopts:
14717                 return
14718
14719         print "Packages installed:   "+str(len(vardb.cpv_all()))
14720         print "Packages in world:    " + \
14721                 str(len(root_config.sets["world"].getAtoms()))
14722         print "Packages in system:   " + \
14723                 str(len(root_config.sets["system"].getAtoms()))
14724         print "Required packages:    "+str(required_pkgs_total)
14725         if "--pretend" in myopts:
14726                 print "Number to remove:     "+str(len(cleanlist))
14727         else:
14728                 print "Number removed:       "+str(len(cleanlist))
14729
14730 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
14731         """
14732         Construct a depgraph for the given resume list. This will raise
14733         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
14734         @rtype: tuple
14735         @returns: (success, depgraph, dropped_tasks)
14736         """
14737         skip_masked = True
14738         skip_unsatisfied = True
14739         mergelist = mtimedb["resume"]["mergelist"]
14740         dropped_tasks = set()
14741         while True:
14742                 mydepgraph = depgraph(settings, trees,
14743                         myopts, myparams, spinner)
14744                 try:
14745                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
14746                                 skip_masked=skip_masked)
14747                 except depgraph.UnsatisfiedResumeDep, e:
14748                         if not skip_unsatisfied:
14749                                 raise
14750
14751                         graph = mydepgraph.digraph
14752                         unsatisfied_parents = dict((dep.parent, dep.parent) \
14753                                 for dep in e.value)
14754                         traversed_nodes = set()
14755                         unsatisfied_stack = list(unsatisfied_parents)
14756                         while unsatisfied_stack:
14757                                 pkg = unsatisfied_stack.pop()
14758                                 if pkg in traversed_nodes:
14759                                         continue
14760                                 traversed_nodes.add(pkg)
14761
14762                                 # If this package was pulled in by a parent
14763                                 # package scheduled for merge, removing this
14764                                 # package may cause the the parent package's
14765                                 # dependency to become unsatisfied.
14766                                 for parent_node in graph.parent_nodes(pkg):
14767                                         if not isinstance(parent_node, Package) \
14768                                                 or parent_node.operation not in ("merge", "nomerge"):
14769                                                 continue
14770                                         unsatisfied = \
14771                                                 graph.child_nodes(parent_node,
14772                                                 ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
14773                                         if pkg in unsatisfied:
14774                                                 unsatisfied_parents[parent_node] = parent_node
14775                                                 unsatisfied_stack.append(parent_node)
14776
14777                         pruned_mergelist = []
14778                         for x in mergelist:
14779                                 if isinstance(x, list) and \
14780                                         tuple(x) not in unsatisfied_parents:
14781                                         pruned_mergelist.append(x)
14782
14783                         # If the mergelist doesn't shrink then this loop is infinite.
14784                         if len(pruned_mergelist) == len(mergelist):
14785                                 # This happens if a package can't be dropped because
14786                                 # it's already installed, but it has unsatisfied PDEPEND.
14787                                 raise
14788                         mergelist[:] = pruned_mergelist
14789
14790                         # Exclude installed packages that have been removed from the graph due
14791                         # to failure to build/install runtime dependencies after the dependent
14792                         # package has already been installed.
14793                         dropped_tasks.update(pkg for pkg in \
14794                                 unsatisfied_parents if pkg.operation != "nomerge")
14795                         mydepgraph.break_refs(unsatisfied_parents)
14796
14797                         del e, graph, traversed_nodes, \
14798                                 unsatisfied_parents, unsatisfied_stack
14799                         continue
14800                 else:
14801                         break
14802         return (success, mydepgraph, dropped_tasks)
14803
14804 def action_build(settings, trees, mtimedb,
14805         myopts, myaction, myfiles, spinner):
14806
14807         # validate the state of the resume data
14808         # so that we can make assumptions later.
14809         for k in ("resume", "resume_backup"):
14810                 if k not in mtimedb:
14811                         continue
14812                 resume_data = mtimedb[k]
14813                 if not isinstance(resume_data, dict):
14814                         del mtimedb[k]
14815                         continue
14816                 mergelist = resume_data.get("mergelist")
14817                 if not isinstance(mergelist, list):
14818                         del mtimedb[k]
14819                         continue
14820                 for x in mergelist:
14821                         if not (isinstance(x, list) and len(x) == 4):
14822                                 continue
14823                         pkg_type, pkg_root, pkg_key, pkg_action = x
14824                         if pkg_root not in trees:
14825                                 # Current $ROOT setting differs,
14826                                 # so the list must be stale.
14827                                 mergelist = None
14828                                 break
14829                 if not mergelist:
14830                         del mtimedb[k]
14831                         continue
14832                 resume_opts = resume_data.get("myopts")
14833                 if not isinstance(resume_opts, (dict, list)):
14834                         del mtimedb[k]
14835                         continue
14836                 favorites = resume_data.get("favorites")
14837                 if not isinstance(favorites, list):
14838                         del mtimedb[k]
14839                         continue
14840
14841         resume = False
14842         if "--resume" in myopts and \
14843                 ("resume" in mtimedb or
14844                 "resume_backup" in mtimedb):
14845                 resume = True
14846                 if "resume" not in mtimedb:
14847                         mtimedb["resume"] = mtimedb["resume_backup"]
14848                         del mtimedb["resume_backup"]
14849                         mtimedb.commit()
14850                 # "myopts" is a list for backward compatibility.
14851                 resume_opts = mtimedb["resume"].get("myopts", [])
14852                 if isinstance(resume_opts, list):
14853                         resume_opts = dict((k,True) for k in resume_opts)
14854                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
14855                         resume_opts.pop(opt, None)
14856
14857                 # Current options always override resume_opts.
14858                 resume_opts.update(myopts)
14859                 myopts.clear()
14860                 myopts.update(resume_opts)
14861
14862                 if "--debug" in myopts:
14863                         writemsg_level("myopts %s\n" % (myopts,))
14864
14865                 # Adjust config according to options of the command being resumed.
14866                 for myroot in trees:
14867                         mysettings =  trees[myroot]["vartree"].settings
14868                         mysettings.unlock()
14869                         adjust_config(myopts, mysettings)
14870                         mysettings.lock()
14871                         del myroot, mysettings
14872
14873         ldpath_mtimes = mtimedb["ldpath"]
14874         favorites=[]
14875         merge_count = 0
14876         buildpkgonly = "--buildpkgonly" in myopts
14877         pretend = "--pretend" in myopts
14878         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
14879         ask = "--ask" in myopts
14880         nodeps = "--nodeps" in myopts
14881         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
14882         tree = "--tree" in myopts
14883         if nodeps and tree:
14884                 tree = False
14885                 del myopts["--tree"]
14886                 portage.writemsg(colorize("WARN", " * ") + \
14887                         "--tree is broken with --nodeps. Disabling...\n")
14888         debug = "--debug" in myopts
14889         verbose = "--verbose" in myopts
14890         quiet = "--quiet" in myopts
14891         if pretend or fetchonly:
14892                 # make the mtimedb readonly
14893                 mtimedb.filename = None
14894         if '--digest' in myopts or 'digest' in settings.features:
14895                 if '--digest' in myopts:
14896                         msg = "The --digest option"
14897                 else:
14898                         msg = "The FEATURES=digest setting"
14899
14900                 msg += " can prevent corruption from being" + \
14901                         " noticed. The `repoman manifest` command is the preferred" + \
14902                         " way to generate manifests and it is capable of doing an" + \
14903                         " entire repository or category at once."
14904                 prefix = bad(" * ")
14905                 writemsg(prefix + "\n")
14906                 from textwrap import wrap
14907                 for line in wrap(msg, 72):
14908                         writemsg("%s%s\n" % (prefix, line))
14909                 writemsg(prefix + "\n")
14910
14911         if "--quiet" not in myopts and \
14912                 ("--pretend" in myopts or "--ask" in myopts or \
14913                 "--tree" in myopts or "--verbose" in myopts):
14914                 action = ""
14915                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14916                         action = "fetched"
14917                 elif "--buildpkgonly" in myopts:
14918                         action = "built"
14919                 else:
14920                         action = "merged"
14921                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
14922                         print
14923                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
14924                         print
14925                 else:
14926                         print
14927                         print darkgreen("These are the packages that would be %s, in order:") % action
14928                         print
14929
14930         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
14931         if not show_spinner:
14932                 spinner.update = spinner.update_quiet
14933
14934         if resume:
14935                 favorites = mtimedb["resume"].get("favorites")
14936                 if not isinstance(favorites, list):
14937                         favorites = []
14938
14939                 if show_spinner:
14940                         print "Calculating dependencies  ",
14941                 myparams = create_depgraph_params(myopts, myaction)
14942
14943                 resume_data = mtimedb["resume"]
14944                 mergelist = resume_data["mergelist"]
14945                 if mergelist and "--skipfirst" in myopts:
14946                         for i, task in enumerate(mergelist):
14947                                 if isinstance(task, list) and \
14948                                         task and task[-1] == "merge":
14949                                         del mergelist[i]
14950                                         break
14951
14952                 success = False
14953                 mydepgraph = None
14954                 try:
14955                         success, mydepgraph, dropped_tasks = resume_depgraph(
14956                                 settings, trees, mtimedb, myopts, myparams, spinner)
14957                 except (portage.exception.PackageNotFound,
14958                         depgraph.UnsatisfiedResumeDep), e:
14959                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14960                                 mydepgraph = e.depgraph
14961                         if show_spinner:
14962                                 print
14963                         from textwrap import wrap
14964                         from portage.output import EOutput
14965                         out = EOutput()
14966
14967                         resume_data = mtimedb["resume"]
14968                         mergelist = resume_data.get("mergelist")
14969                         if not isinstance(mergelist, list):
14970                                 mergelist = []
14971                         if mergelist and debug or (verbose and not quiet):
14972                                 out.eerror("Invalid resume list:")
14973                                 out.eerror("")
14974                                 indent = "  "
14975                                 for task in mergelist:
14976                                         if isinstance(task, list):
14977                                                 out.eerror(indent + str(tuple(task)))
14978                                 out.eerror("")
14979
14980                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
14981                                 out.eerror("One or more packages are either masked or " + \
14982                                         "have missing dependencies:")
14983                                 out.eerror("")
14984                                 indent = "  "
14985                                 for dep in e.value:
14986                                         if dep.atom is None:
14987                                                 out.eerror(indent + "Masked package:")
14988                                                 out.eerror(2 * indent + str(dep.parent))
14989                                                 out.eerror("")
14990                                         else:
14991                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
14992                                                 out.eerror(2 * indent + str(dep.parent))
14993                                                 out.eerror("")
14994                                 msg = "The resume list contains packages " + \
14995                                         "that are either masked or have " + \
14996                                         "unsatisfied dependencies. " + \
14997                                         "Please restart/continue " + \
14998                                         "the operation manually, or use --skipfirst " + \
14999                                         "to skip the first package in the list and " + \
15000                                         "any other packages that may be " + \
15001                                         "masked or have missing dependencies."
15002                                 for line in wrap(msg, 72):
15003                                         out.eerror(line)
15004                         elif isinstance(e, portage.exception.PackageNotFound):
15005                                 out.eerror("An expected package is " + \
15006                                         "not available: %s" % str(e))
15007                                 out.eerror("")
15008                                 msg = "The resume list contains one or more " + \
15009                                         "packages that are no longer " + \
15010                                         "available. Please restart/continue " + \
15011                                         "the operation manually."
15012                                 for line in wrap(msg, 72):
15013                                         out.eerror(line)
15014                 else:
15015                         if show_spinner:
15016                                 print "\b\b... done!"
15017
15018                 if success:
15019                         if dropped_tasks:
15020                                 portage.writemsg("!!! One or more packages have been " + \
15021                                         "dropped due to\n" + \
15022                                         "!!! masking or unsatisfied dependencies:\n\n",
15023                                         noiselevel=-1)
15024                                 for task in dropped_tasks:
15025                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
15026                                 portage.writemsg("\n", noiselevel=-1)
15027                         del dropped_tasks
15028                 else:
15029                         if mydepgraph is not None:
15030                                 mydepgraph.display_problems()
15031                         if not (ask or pretend):
15032                                 # delete the current list and also the backup
15033                                 # since it's probably stale too.
15034                                 for k in ("resume", "resume_backup"):
15035                                         mtimedb.pop(k, None)
15036                                 mtimedb.commit()
15037
15038                         return 1
15039         else:
15040                 if ("--resume" in myopts):
15041                         print darkgreen("emerge: It seems we have nothing to resume...")
15042                         return os.EX_OK
15043
15044                 myparams = create_depgraph_params(myopts, myaction)
15045                 if "--quiet" not in myopts and "--nodeps" not in myopts:
15046                         print "Calculating dependencies  ",
15047                         sys.stdout.flush()
15048                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
15049                 try:
15050                         retval, favorites = mydepgraph.select_files(myfiles)
15051                 except portage.exception.PackageNotFound, e:
15052                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
15053                         return 1
15054                 except portage.exception.PackageSetNotFound, e:
15055                         root_config = trees[settings["ROOT"]]["root_config"]
15056                         display_missing_pkg_set(root_config, e.value)
15057                         return 1
15058                 if show_spinner:
15059                         print "\b\b... done!"
15060                 if not retval:
15061                         mydepgraph.display_problems()
15062                         return 1
15063
15064         if "--pretend" not in myopts and \
15065                 ("--ask" in myopts or "--tree" in myopts or \
15066                 "--verbose" in myopts) and \
15067                 not ("--quiet" in myopts and "--ask" not in myopts):
15068                 if "--resume" in myopts:
15069                         mymergelist = mydepgraph.altlist()
15070                         if len(mymergelist) == 0:
15071                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15072                                 return os.EX_OK
15073                         favorites = mtimedb["resume"]["favorites"]
15074                         retval = mydepgraph.display(
15075                                 mydepgraph.altlist(reversed=tree),
15076                                 favorites=favorites)
15077                         mydepgraph.display_problems()
15078                         if retval != os.EX_OK:
15079                                 return retval
15080                         prompt="Would you like to resume merging these packages?"
15081                 else:
15082                         retval = mydepgraph.display(
15083                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15084                                 favorites=favorites)
15085                         mydepgraph.display_problems()
15086                         if retval != os.EX_OK:
15087                                 return retval
15088                         mergecount=0
15089                         for x in mydepgraph.altlist():
15090                                 if isinstance(x, Package) and x.operation == "merge":
15091                                         mergecount += 1
15092
15093                         if mergecount==0:
15094                                 sets = trees[settings["ROOT"]]["root_config"].sets
15095                                 world_candidates = None
15096                                 if "--noreplace" in myopts and \
15097                                         not oneshot and favorites:
15098                                         # Sets that are not world candidates are filtered
15099                                         # out here since the favorites list needs to be
15100                                         # complete for depgraph.loadResumeCommand() to
15101                                         # operate correctly.
15102                                         world_candidates = [x for x in favorites \
15103                                                 if not (x.startswith(SETPREFIX) and \
15104                                                 not sets[x[1:]].world_candidate)]
15105                                 if "--noreplace" in myopts and \
15106                                         not oneshot and world_candidates:
15107                                         print
15108                                         for x in world_candidates:
15109                                                 print " %s %s" % (good("*"), x)
15110                                         prompt="Would you like to add these packages to your world favorites?"
15111                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
15112                                         prompt="Nothing to merge; would you like to auto-clean packages?"
15113                                 else:
15114                                         print
15115                                         print "Nothing to merge; quitting."
15116                                         print
15117                                         return os.EX_OK
15118                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
15119                                 prompt="Would you like to fetch the source files for these packages?"
15120                         else:
15121                                 prompt="Would you like to merge these packages?"
15122                 print
15123                 if "--ask" in myopts and userquery(prompt) == "No":
15124                         print
15125                         print "Quitting."
15126                         print
15127                         return os.EX_OK
15128                 # Don't ask again (e.g. when auto-cleaning packages after merge)
15129                 myopts.pop("--ask", None)
15130
15131         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
15132                 if ("--resume" in myopts):
15133                         mymergelist = mydepgraph.altlist()
15134                         if len(mymergelist) == 0:
15135                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
15136                                 return os.EX_OK
15137                         favorites = mtimedb["resume"]["favorites"]
15138                         retval = mydepgraph.display(
15139                                 mydepgraph.altlist(reversed=tree),
15140                                 favorites=favorites)
15141                         mydepgraph.display_problems()
15142                         if retval != os.EX_OK:
15143                                 return retval
15144                 else:
15145                         retval = mydepgraph.display(
15146                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
15147                                 favorites=favorites)
15148                         mydepgraph.display_problems()
15149                         if retval != os.EX_OK:
15150                                 return retval
15151                         if "--buildpkgonly" in myopts:
15152                                 graph_copy = mydepgraph.digraph.clone()
15153                                 removed_nodes = set()
15154                                 for node in graph_copy:
15155                                         if not isinstance(node, Package) or \
15156                                                 node.operation == "nomerge":
15157                                                 removed_nodes.add(node)
15158                                 graph_copy.difference_update(removed_nodes)
15159                                 if not graph_copy.hasallzeros(ignore_priority = \
15160                                         DepPrioritySatisfiedRange.ignore_medium):
15161                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
15162                                         print "!!! You have to merge the dependencies before you can build this package.\n"
15163                                         return 1
15164         else:
15165                 if "--buildpkgonly" in myopts:
15166                         graph_copy = mydepgraph.digraph.clone()
15167                         removed_nodes = set()
15168                         for node in graph_copy:
15169                                 if not isinstance(node, Package) or \
15170                                         node.operation == "nomerge":
15171                                         removed_nodes.add(node)
15172                         graph_copy.difference_update(removed_nodes)
15173                         if not graph_copy.hasallzeros(ignore_priority = \
15174                                 DepPrioritySatisfiedRange.ignore_medium):
15175                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
15176                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
15177                                 return 1
15178
15179                 if ("--resume" in myopts):
15180                         favorites=mtimedb["resume"]["favorites"]
15181                         mymergelist = mydepgraph.altlist()
15182                         mydepgraph.break_refs(mymergelist)
15183                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15184                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
15185                         del mydepgraph, mymergelist
15186                         clear_caches(trees)
15187
15188                         retval = mergetask.merge()
15189                         merge_count = mergetask.curval
15190                 else:
15191                         if "resume" in mtimedb and \
15192                         "mergelist" in mtimedb["resume"] and \
15193                         len(mtimedb["resume"]["mergelist"]) > 1:
15194                                 mtimedb["resume_backup"] = mtimedb["resume"]
15195                                 del mtimedb["resume"]
15196                                 mtimedb.commit()
15197                         mtimedb["resume"]={}
15198                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
15199                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
15200                         # a list type for options.
15201                         mtimedb["resume"]["myopts"] = myopts.copy()
15202
15203                         # Convert Atom instances to plain str.
15204                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
15205
15206                         pkglist = mydepgraph.altlist()
15207                         mydepgraph.saveNomergeFavorites()
15208                         mydepgraph.break_refs(pkglist)
15209                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
15210                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
15211                         del mydepgraph, pkglist
15212                         clear_caches(trees)
15213
15214                         retval = mergetask.merge()
15215                         merge_count = mergetask.curval
15216
15217                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
15218                         if "yes" == settings.get("AUTOCLEAN"):
15219                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
15220                                 unmerge(trees[settings["ROOT"]]["root_config"],
15221                                         myopts, "clean", [],
15222                                         ldpath_mtimes, autoclean=1)
15223                         else:
15224                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
15225                                         + " AUTOCLEAN is disabled.  This can cause serious"
15226                                         + " problems due to overlapping packages.\n")
15227                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
15228
15229                 return retval
15230
15231 def multiple_actions(action1, action2):
15232         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
15233         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
15234         sys.exit(1)
15235
15236 def insert_optional_args(args):
15237         """
15238         Parse optional arguments and insert a value if one has
15239         not been provided. This is done before feeding the args
15240         to the optparse parser since that parser does not support
15241         this feature natively.
15242         """
15243
15244         new_args = []
15245         jobs_opts = ("-j", "--jobs")
15246         default_arg_opts = {
15247                 '--deselect'   : ('n',),
15248                 '--root-deps'  : ('rdeps',),
15249         }
15250         arg_stack = args[:]
15251         arg_stack.reverse()
15252         while arg_stack:
15253                 arg = arg_stack.pop()
15254
15255                 default_arg_choices = default_arg_opts.get(arg)
15256                 if default_arg_choices is not None:
15257                         new_args.append(arg)
15258                         if arg_stack and arg_stack[-1] in default_arg_choices:
15259                                 new_args.append(arg_stack.pop())
15260                         else:
15261                                 # insert default argument
15262                                 new_args.append('True')
15263                         continue
15264
15265                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
15266                 if not (short_job_opt or arg in jobs_opts):
15267                         new_args.append(arg)
15268                         continue
15269
15270                 # Insert an empty placeholder in order to
15271                 # satisfy the requirements of optparse.
15272
15273                 new_args.append("--jobs")
15274                 job_count = None
15275                 saved_opts = None
15276                 if short_job_opt and len(arg) > 2:
15277                         if arg[:2] == "-j":
15278                                 try:
15279                                         job_count = int(arg[2:])
15280                                 except ValueError:
15281                                         saved_opts = arg[2:]
15282                         else:
15283                                 job_count = "True"
15284                                 saved_opts = arg[1:].replace("j", "")
15285
15286                 if job_count is None and arg_stack:
15287                         try:
15288                                 job_count = int(arg_stack[-1])
15289                         except ValueError:
15290                                 pass
15291                         else:
15292                                 # Discard the job count from the stack
15293                                 # since we're consuming it here.
15294                                 arg_stack.pop()
15295
15296                 if job_count is None:
15297                         # unlimited number of jobs
15298                         new_args.append("True")
15299                 else:
15300                         new_args.append(str(job_count))
15301
15302                 if saved_opts is not None:
15303                         new_args.append("-" + saved_opts)
15304
15305         return new_args
15306
15307 def parse_opts(tmpcmdline, silent=False):
15308         myaction=None
15309         myopts = {}
15310         myfiles=[]
15311
15312         global actions, options, shortmapping
15313
15314         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
15315         argument_options = {
15316                 "--config-root": {
15317                         "help":"specify the location for portage configuration files",
15318                         "action":"store"
15319                 },
15320                 "--color": {
15321                         "help":"enable or disable color output",
15322                         "type":"choice",
15323                         "choices":("y", "n")
15324                 },
15325
15326                 "--deselect": {
15327                         "help"    : "remove atoms from the world file",
15328                         "type"    : "choice",
15329                         "choices" : ("True", "n")
15330                 },
15331
15332                 "--jobs": {
15333
15334                         "help"   : "Specifies the number of packages to build " + \
15335                                 "simultaneously.",
15336
15337                         "action" : "store"
15338                 },
15339
15340                 "--load-average": {
15341
15342                         "help"   :"Specifies that no new builds should be started " + \
15343                                 "if there are other builds running and the load average " + \
15344                                 "is at least LOAD (a floating-point number).",
15345
15346                         "action" : "store"
15347                 },
15348
15349                 "--with-bdeps": {
15350                         "help":"include unnecessary build time dependencies",
15351                         "type":"choice",
15352                         "choices":("y", "n")
15353                 },
15354                 "--reinstall": {
15355                         "help":"specify conditions to trigger package reinstallation",
15356                         "type":"choice",
15357                         "choices":["changed-use"]
15358                 },
15359                 "--root": {
15360                  "help"   : "specify the target root filesystem for merging packages",
15361                  "action" : "store"
15362                 },
15363
15364                 "--root-deps": {
15365                         "help"    : "modify interpretation of depedencies",
15366                         "type"    : "choice",
15367                         "choices" :("True", "rdeps")
15368                 },
15369         }
15370
15371         from optparse import OptionParser
15372         parser = OptionParser()
15373         if parser.has_option("--help"):
15374                 parser.remove_option("--help")
15375
15376         for action_opt in actions:
15377                 parser.add_option("--" + action_opt, action="store_true",
15378                         dest=action_opt.replace("-", "_"), default=False)
15379         for myopt in options:
15380                 parser.add_option(myopt, action="store_true",
15381                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15382         for shortopt, longopt in shortmapping.iteritems():
15383                 parser.add_option("-" + shortopt, action="store_true",
15384                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
15385         for myalias, myopt in longopt_aliases.iteritems():
15386                 parser.add_option(myalias, action="store_true",
15387                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
15388
15389         for myopt, kwargs in argument_options.iteritems():
15390                 parser.add_option(myopt,
15391                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
15392
15393         tmpcmdline = insert_optional_args(tmpcmdline)
15394
15395         myoptions, myargs = parser.parse_args(args=tmpcmdline)
15396
15397         if myoptions.deselect == "True":
15398                 myoptions.deselect = True
15399
15400         if myoptions.root_deps == "True":
15401                 myoptions.root_deps = True
15402
15403         if myoptions.jobs:
15404                 jobs = None
15405                 if myoptions.jobs == "True":
15406                         jobs = True
15407                 else:
15408                         try:
15409                                 jobs = int(myoptions.jobs)
15410                         except ValueError:
15411                                 jobs = -1
15412
15413                 if jobs is not True and \
15414                         jobs < 1:
15415                         jobs = None
15416                         if not silent:
15417                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
15418                                         (myoptions.jobs,), noiselevel=-1)
15419
15420                 myoptions.jobs = jobs
15421
15422         if myoptions.load_average:
15423                 try:
15424                         load_average = float(myoptions.load_average)
15425                 except ValueError:
15426                         load_average = 0.0
15427
15428                 if load_average <= 0.0:
15429                         load_average = None
15430                         if not silent:
15431                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
15432                                         (myoptions.load_average,), noiselevel=-1)
15433
15434                 myoptions.load_average = load_average
15435
15436         for myopt in options:
15437                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
15438                 if v:
15439                         myopts[myopt] = True
15440
15441         for myopt in argument_options:
15442                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
15443                 if v is not None:
15444                         myopts[myopt] = v
15445
15446         if myoptions.searchdesc:
15447                 myoptions.search = True
15448
15449         for action_opt in actions:
15450                 v = getattr(myoptions, action_opt.replace("-", "_"))
15451                 if v:
15452                         if myaction:
15453                                 multiple_actions(myaction, action_opt)
15454                                 sys.exit(1)
15455                         myaction = action_opt
15456
15457         if myaction is None and myoptions.deselect is True:
15458                 myaction = 'deselect'
15459
15460         myfiles += myargs
15461
15462         return myaction, myopts, myfiles
15463
15464 def validate_ebuild_environment(trees):
15465         for myroot in trees:
15466                 settings = trees[myroot]["vartree"].settings
15467                 settings.validate()
15468
15469 def clear_caches(trees):
15470         for d in trees.itervalues():
15471                 d["porttree"].dbapi.melt()
15472                 d["porttree"].dbapi._aux_cache.clear()
15473                 d["bintree"].dbapi._aux_cache.clear()
15474                 d["bintree"].dbapi._clear_cache()
15475                 d["vartree"].dbapi.linkmap._clear_cache()
15476         portage.dircache.clear()
15477         gc.collect()
15478
15479 def load_emerge_config(trees=None):
15480         kwargs = {}
15481         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
15482                 v = os.environ.get(envvar, None)
15483                 if v and v.strip():
15484                         kwargs[k] = v
15485         trees = portage.create_trees(trees=trees, **kwargs)
15486
15487         for root, root_trees in trees.iteritems():
15488                 settings = root_trees["vartree"].settings
15489                 setconfig = load_default_config(settings, root_trees)
15490                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
15491
15492         settings = trees["/"]["vartree"].settings
15493
15494         for myroot in trees:
15495                 if myroot != "/":
15496                         settings = trees[myroot]["vartree"].settings
15497                         break
15498
15499         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
15500         mtimedb = portage.MtimeDB(mtimedbfile)
15501         
15502         return settings, trees, mtimedb
15503
15504 def adjust_config(myopts, settings):
15505         """Make emerge specific adjustments to the config."""
15506
15507         # To enhance usability, make some vars case insensitive by forcing them to
15508         # lower case.
15509         for myvar in ("AUTOCLEAN", "NOCOLOR"):
15510                 if myvar in settings:
15511                         settings[myvar] = settings[myvar].lower()
15512                         settings.backup_changes(myvar)
15513         del myvar
15514
15515         # Kill noauto as it will break merges otherwise.
15516         if "noauto" in settings.features:
15517                 settings.features.remove('noauto')
15518                 settings['FEATURES'] = ' '.join(sorted(settings.features))
15519                 settings.backup_changes("FEATURES")
15520
15521         CLEAN_DELAY = 5
15522         try:
15523                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
15524         except ValueError, e:
15525                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15526                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
15527                         settings["CLEAN_DELAY"], noiselevel=-1)
15528         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
15529         settings.backup_changes("CLEAN_DELAY")
15530
15531         EMERGE_WARNING_DELAY = 10
15532         try:
15533                 EMERGE_WARNING_DELAY = int(settings.get(
15534                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
15535         except ValueError, e:
15536                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15537                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
15538                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
15539         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
15540         settings.backup_changes("EMERGE_WARNING_DELAY")
15541
15542         if "--quiet" in myopts:
15543                 settings["PORTAGE_QUIET"]="1"
15544                 settings.backup_changes("PORTAGE_QUIET")
15545
15546         if "--verbose" in myopts:
15547                 settings["PORTAGE_VERBOSE"] = "1"
15548                 settings.backup_changes("PORTAGE_VERBOSE")
15549
15550         # Set so that configs will be merged regardless of remembered status
15551         if ("--noconfmem" in myopts):
15552                 settings["NOCONFMEM"]="1"
15553                 settings.backup_changes("NOCONFMEM")
15554
15555         # Set various debug markers... They should be merged somehow.
15556         PORTAGE_DEBUG = 0
15557         try:
15558                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
15559                 if PORTAGE_DEBUG not in (0, 1):
15560                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
15561                                 PORTAGE_DEBUG, noiselevel=-1)
15562                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
15563                                 noiselevel=-1)
15564                         PORTAGE_DEBUG = 0
15565         except ValueError, e:
15566                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
15567                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
15568                         settings["PORTAGE_DEBUG"], noiselevel=-1)
15569                 del e
15570         if "--debug" in myopts:
15571                 PORTAGE_DEBUG = 1
15572         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
15573         settings.backup_changes("PORTAGE_DEBUG")
15574
15575         if settings.get("NOCOLOR") not in ("yes","true"):
15576                 portage.output.havecolor = 1
15577
15578         """The explicit --color < y | n > option overrides the NOCOLOR environment
15579         variable and stdout auto-detection."""
15580         if "--color" in myopts:
15581                 if "y" == myopts["--color"]:
15582                         portage.output.havecolor = 1
15583                         settings["NOCOLOR"] = "false"
15584                 else:
15585                         portage.output.havecolor = 0
15586                         settings["NOCOLOR"] = "true"
15587                 settings.backup_changes("NOCOLOR")
15588         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
15589                 portage.output.havecolor = 0
15590                 settings["NOCOLOR"] = "true"
15591                 settings.backup_changes("NOCOLOR")
15592
15593 def apply_priorities(settings):
15594         ionice(settings)
15595         nice(settings)
15596
15597 def nice(settings):
15598         try:
15599                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
15600         except (OSError, ValueError), e:
15601                 out = portage.output.EOutput()
15602                 out.eerror("Failed to change nice value to '%s'" % \
15603                         settings["PORTAGE_NICENESS"])
15604                 out.eerror("%s\n" % str(e))
15605
15606 def ionice(settings):
15607
15608         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
15609         if ionice_cmd:
15610                 ionice_cmd = shlex.split(ionice_cmd)
15611         if not ionice_cmd:
15612                 return
15613
15614         from portage.util import varexpand
15615         variables = {"PID" : str(os.getpid())}
15616         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
15617
15618         try:
15619                 rval = portage.process.spawn(cmd, env=os.environ)
15620         except portage.exception.CommandNotFound:
15621                 # The OS kernel probably doesn't support ionice,
15622                 # so return silently.
15623                 return
15624
15625         if rval != os.EX_OK:
15626                 out = portage.output.EOutput()
15627                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
15628                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
15629
15630 def display_missing_pkg_set(root_config, set_name):
15631
15632         msg = []
15633         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
15634                 "The following sets exist:") % \
15635                 colorize("INFORM", set_name))
15636         msg.append("")
15637
15638         for s in sorted(root_config.sets):
15639                 msg.append("    %s" % s)
15640         msg.append("")
15641
15642         writemsg_level("".join("%s\n" % l for l in msg),
15643                 level=logging.ERROR, noiselevel=-1)
15644
15645 def expand_set_arguments(myfiles, myaction, root_config):
15646         retval = os.EX_OK
15647         setconfig = root_config.setconfig
15648
15649         sets = setconfig.getSets()
15650
15651         # In order to know exactly which atoms/sets should be added to the
15652         # world file, the depgraph performs set expansion later. It will get
15653         # confused about where the atoms came from if it's not allowed to
15654         # expand them itself.
15655         do_not_expand = (None, )
15656         newargs = []
15657         for a in myfiles:
15658                 if a in ("system", "world"):
15659                         newargs.append(SETPREFIX+a)
15660                 else:
15661                         newargs.append(a)
15662         myfiles = newargs
15663         del newargs
15664         newargs = []
15665
15666         # separators for set arguments
15667         ARG_START = "{"
15668         ARG_END = "}"
15669
15670         # WARNING: all operators must be of equal length
15671         IS_OPERATOR = "/@"
15672         DIFF_OPERATOR = "-@"
15673         UNION_OPERATOR = "+@"
15674         
15675         for i in range(0, len(myfiles)):
15676                 if myfiles[i].startswith(SETPREFIX):
15677                         start = 0
15678                         end = 0
15679                         x = myfiles[i][len(SETPREFIX):]
15680                         newset = ""
15681                         while x:
15682                                 start = x.find(ARG_START)
15683                                 end = x.find(ARG_END)
15684                                 if start > 0 and start < end:
15685                                         namepart = x[:start]
15686                                         argpart = x[start+1:end]
15687                                 
15688                                         # TODO: implement proper quoting
15689                                         args = argpart.split(",")
15690                                         options = {}
15691                                         for a in args:
15692                                                 if "=" in a:
15693                                                         k, v  = a.split("=", 1)
15694                                                         options[k] = v
15695                                                 else:
15696                                                         options[a] = "True"
15697                                         setconfig.update(namepart, options)
15698                                         newset += (x[:start-len(namepart)]+namepart)
15699                                         x = x[end+len(ARG_END):]
15700                                 else:
15701                                         newset += x
15702                                         x = ""
15703                         myfiles[i] = SETPREFIX+newset
15704                                 
15705         sets = setconfig.getSets()
15706
15707         # display errors that occured while loading the SetConfig instance
15708         for e in setconfig.errors:
15709                 print colorize("BAD", "Error during set creation: %s" % e)
15710         
15711         # emerge relies on the existance of sets with names "world" and "system"
15712         required_sets = ("world", "system")
15713         missing_sets = []
15714
15715         for s in required_sets:
15716                 if s not in sets:
15717                         missing_sets.append(s)
15718         if missing_sets:
15719                 if len(missing_sets) > 2:
15720                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
15721                         missing_sets_str += ', and "%s"' % missing_sets[-1]
15722                 elif len(missing_sets) == 2:
15723                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
15724                 else:
15725                         missing_sets_str = '"%s"' % missing_sets[-1]
15726                 msg = ["emerge: incomplete set configuration, " + \
15727                         "missing set(s): %s" % missing_sets_str]
15728                 if sets:
15729                         msg.append("        sets defined: %s" % ", ".join(sets))
15730                 msg.append("        This usually means that '%s'" % \
15731                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
15732                 msg.append("        is missing or corrupt.")
15733                 for line in msg:
15734                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
15735                 return (None, 1)
15736         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
15737
15738         for a in myfiles:
15739                 if a.startswith(SETPREFIX):
15740                         # support simple set operations (intersection, difference and union)
15741                         # on the commandline. Expressions are evaluated strictly left-to-right
15742                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
15743                                 expression = a[len(SETPREFIX):]
15744                                 expr_sets = []
15745                                 expr_ops = []
15746                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
15747                                         is_pos = expression.rfind(IS_OPERATOR)
15748                                         diff_pos = expression.rfind(DIFF_OPERATOR)
15749                                         union_pos = expression.rfind(UNION_OPERATOR)
15750                                         op_pos = max(is_pos, diff_pos, union_pos)
15751                                         s1 = expression[:op_pos]
15752                                         s2 = expression[op_pos+len(IS_OPERATOR):]
15753                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
15754                                         if not s2 in sets:
15755                                                 display_missing_pkg_set(root_config, s2)
15756                                                 return (None, 1)
15757                                         expr_sets.insert(0, s2)
15758                                         expr_ops.insert(0, op)
15759                                         expression = s1
15760                                 if not expression in sets:
15761                                         display_missing_pkg_set(root_config, expression)
15762                                         return (None, 1)
15763                                 expr_sets.insert(0, expression)
15764                                 result = set(setconfig.getSetAtoms(expression))
15765                                 for i in range(0, len(expr_ops)):
15766                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
15767                                         if expr_ops[i] == IS_OPERATOR:
15768                                                 result.intersection_update(s2)
15769                                         elif expr_ops[i] == DIFF_OPERATOR:
15770                                                 result.difference_update(s2)
15771                                         elif expr_ops[i] == UNION_OPERATOR:
15772                                                 result.update(s2)
15773                                         else:
15774                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
15775                                 newargs.extend(result)
15776                         else:                   
15777                                 s = a[len(SETPREFIX):]
15778                                 if s not in sets:
15779                                         display_missing_pkg_set(root_config, s)
15780                                         return (None, 1)
15781                                 setconfig.active.append(s)
15782                                 try:
15783                                         set_atoms = setconfig.getSetAtoms(s)
15784                                 except portage.exception.PackageSetNotFound, e:
15785                                         writemsg_level(("emerge: the given set '%s' " + \
15786                                                 "contains a non-existent set named '%s'.\n") % \
15787                                                 (s, e), level=logging.ERROR, noiselevel=-1)
15788                                         return (None, 1)
15789                                 if myaction in unmerge_actions and \
15790                                                 not sets[s].supportsOperation("unmerge"):
15791                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
15792                                                 "not support unmerge operations\n")
15793                                         retval = 1
15794                                 elif not set_atoms:
15795                                         print "emerge: '%s' is an empty set" % s
15796                                 elif myaction not in do_not_expand:
15797                                         newargs.extend(set_atoms)
15798                                 else:
15799                                         newargs.append(SETPREFIX+s)
15800                                 for e in sets[s].errors:
15801                                         print e
15802                 else:
15803                         newargs.append(a)
15804         return (newargs, retval)
15805
15806 def repo_name_check(trees):
15807         missing_repo_names = set()
15808         for root, root_trees in trees.iteritems():
15809                 if "porttree" in root_trees:
15810                         portdb = root_trees["porttree"].dbapi
15811                         missing_repo_names.update(portdb.porttrees)
15812                         repos = portdb.getRepositories()
15813                         for r in repos:
15814                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
15815                         if portdb.porttree_root in missing_repo_names and \
15816                                 not os.path.exists(os.path.join(
15817                                 portdb.porttree_root, "profiles")):
15818                                 # This is normal if $PORTDIR happens to be empty,
15819                                 # so don't warn about it.
15820                                 missing_repo_names.remove(portdb.porttree_root)
15821
15822         if missing_repo_names:
15823                 msg = []
15824                 msg.append("WARNING: One or more repositories " + \
15825                         "have missing repo_name entries:")
15826                 msg.append("")
15827                 for p in missing_repo_names:
15828                         msg.append("\t%s/profiles/repo_name" % (p,))
15829                 msg.append("")
15830                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
15831                         "should be a plain text file containing a unique " + \
15832                         "name for the repository on the first line.", 70))
15833                 writemsg_level("".join("%s\n" % l for l in msg),
15834                         level=logging.WARNING, noiselevel=-1)
15835
15836         return bool(missing_repo_names)
15837
15838 def repo_name_duplicate_check(trees):
15839         ignored_repos = {}
15840         for root, root_trees in trees.iteritems():
15841                 if 'porttree' in root_trees:
15842                         portdb = root_trees['porttree'].dbapi
15843                         if portdb.mysettings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
15844                                 for repo_name, paths in portdb._ignored_repos:
15845                                         k = (root, repo_name, portdb.getRepositoryPath(repo_name))
15846                                         ignored_repos.setdefault(k, []).extend(paths)
15847
15848         if ignored_repos:
15849                 msg = []
15850                 msg.append('WARNING: One or more repositories ' + \
15851                         'have been ignored due to duplicate')
15852                 msg.append('  profiles/repo_name entries:')
15853                 msg.append('')
15854                 for k in sorted(ignored_repos):
15855                         msg.append('  %s overrides' % (k,))
15856                         for path in ignored_repos[k]:
15857                                 msg.append('    %s' % (path,))
15858                         msg.append('')
15859                 msg.extend('  ' + x for x in textwrap.wrap(
15860                         "All profiles/repo_name entries must be unique in order " + \
15861                         "to avoid having duplicates ignored. " + \
15862                         "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
15863                         "/etc/make.conf if you would like to disable this warning."))
15864                 writemsg_level(''.join('%s\n' % l for l in msg),
15865                         level=logging.WARNING, noiselevel=-1)
15866
15867         return bool(ignored_repos)
15868
15869 def config_protect_check(trees):
15870         for root, root_trees in trees.iteritems():
15871                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
15872                         msg = "!!! CONFIG_PROTECT is empty"
15873                         if root != "/":
15874                                 msg += " for '%s'" % root
15875                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
15876
15877 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
15878
15879         if "--quiet" in myopts:
15880                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15881                 print "!!! one of the following fully-qualified ebuild names instead:\n"
15882                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15883                         print "    " + colorize("INFORM", cp)
15884                 return
15885
15886         s = search(root_config, spinner, "--searchdesc" in myopts,
15887                 "--quiet" not in myopts, "--usepkg" in myopts,
15888                 "--usepkgonly" in myopts)
15889         null_cp = portage.dep_getkey(insert_category_into_atom(
15890                 arg, "null"))
15891         cat, atom_pn = portage.catsplit(null_cp)
15892         s.searchkey = atom_pn
15893         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
15894                 s.addCP(cp)
15895         s.output()
15896         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
15897         print "!!! one of the above fully-qualified ebuild names instead.\n"
15898
15899 def profile_check(trees, myaction, myopts):
15900         if myaction in ("info", "sync"):
15901                 return os.EX_OK
15902         elif "--version" in myopts or "--help" in myopts:
15903                 return os.EX_OK
15904         for root, root_trees in trees.iteritems():
15905                 if root_trees["root_config"].settings.profiles:
15906                         continue
15907                 # generate some profile related warning messages
15908                 validate_ebuild_environment(trees)
15909                 msg = "If you have just changed your profile configuration, you " + \
15910                         "should revert back to the previous configuration. Due to " + \
15911                         "your current profile being invalid, allowed actions are " + \
15912                         "limited to --help, --info, --sync, and --version."
15913                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
15914                         level=logging.ERROR, noiselevel=-1)
15915                 return 1
15916         return os.EX_OK
15917
15918 def emerge_main():
15919         global portage  # NFC why this is necessary now - genone
15920         portage._disable_legacy_globals()
15921         # Disable color until we're sure that it should be enabled (after
15922         # EMERGE_DEFAULT_OPTS has been parsed).
15923         portage.output.havecolor = 0
15924         # This first pass is just for options that need to be known as early as
15925         # possible, such as --config-root.  They will be parsed again later,
15926         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
15927         # the value of --config-root).
15928         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
15929         if "--debug" in myopts:
15930                 os.environ["PORTAGE_DEBUG"] = "1"
15931         if "--config-root" in myopts:
15932                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
15933         if "--root" in myopts:
15934                 os.environ["ROOT"] = myopts["--root"]
15935
15936         # Portage needs to ensure a sane umask for the files it creates.
15937         os.umask(022)
15938         settings, trees, mtimedb = load_emerge_config()
15939         portdb = trees[settings["ROOT"]]["porttree"].dbapi
15940         rval = profile_check(trees, myaction, myopts)
15941         if rval != os.EX_OK:
15942                 return rval
15943
15944         if portage._global_updates(trees, mtimedb["updates"]):
15945                 mtimedb.commit()
15946                 # Reload the whole config from scratch.
15947                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15948                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15949
15950         xterm_titles = "notitles" not in settings.features
15951
15952         tmpcmdline = []
15953         if "--ignore-default-opts" not in myopts:
15954                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
15955         tmpcmdline.extend(sys.argv[1:])
15956         myaction, myopts, myfiles = parse_opts(tmpcmdline)
15957
15958         if "--digest" in myopts:
15959                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
15960                 # Reload the whole config from scratch so that the portdbapi internal
15961                 # config is updated with new FEATURES.
15962                 settings, trees, mtimedb = load_emerge_config(trees=trees)
15963                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
15964
15965         for myroot in trees:
15966                 mysettings =  trees[myroot]["vartree"].settings
15967                 mysettings.unlock()
15968                 adjust_config(myopts, mysettings)
15969                 if '--pretend' not in myopts and myaction in \
15970                         (None, 'clean', 'depclean', 'prune', 'unmerge'):
15971                         mysettings["PORTAGE_COUNTER_HASH"] = \
15972                                 trees[myroot]["vartree"].dbapi._counter_hash()
15973                         mysettings.backup_changes("PORTAGE_COUNTER_HASH")
15974                 mysettings.lock()
15975                 del myroot, mysettings
15976
15977         apply_priorities(settings)
15978
15979         spinner = stdout_spinner()
15980         if "candy" in settings.features:
15981                 spinner.update = spinner.update_scroll
15982
15983         if "--quiet" not in myopts:
15984                 portage.deprecated_profile_check(settings=settings)
15985                 repo_name_check(trees)
15986                 repo_name_duplicate_check(trees)
15987                 config_protect_check(trees)
15988
15989         for mytrees in trees.itervalues():
15990                 mydb = mytrees["porttree"].dbapi
15991                 # Freeze the portdbapi for performance (memoize all xmatch results).
15992                 mydb.freeze()
15993         del mytrees, mydb
15994
15995         if "moo" in myfiles:
15996                 print """
15997
15998   Larry loves Gentoo (""" + platform.system() + """)
15999
16000  _______________________
16001 < Have you mooed today? >
16002  -----------------------
16003         \   ^__^
16004          \  (oo)\_______
16005             (__)\       )\/\ 
16006                 ||----w |
16007                 ||     ||
16008
16009 """
16010
16011         for x in myfiles:
16012                 ext = os.path.splitext(x)[1]
16013                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
16014                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
16015                         break
16016
16017         root_config = trees[settings["ROOT"]]["root_config"]
16018         if myaction == "list-sets":
16019                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
16020                 sys.stdout.flush()
16021                 return os.EX_OK
16022
16023         # only expand sets for actions taking package arguments
16024         oldargs = myfiles[:]
16025         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
16026                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
16027                 if retval != os.EX_OK:
16028                         return retval
16029
16030                 # Need to handle empty sets specially, otherwise emerge will react 
16031                 # with the help message for empty argument lists
16032                 if oldargs and not myfiles:
16033                         print "emerge: no targets left after set expansion"
16034                         return 0
16035
16036         if ("--tree" in myopts) and ("--columns" in myopts):
16037                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
16038                 return 1
16039
16040         if ("--quiet" in myopts):
16041                 spinner.update = spinner.update_quiet
16042                 portage.util.noiselimit = -1
16043
16044         # Always create packages if FEATURES=buildpkg
16045         # Imply --buildpkg if --buildpkgonly
16046         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
16047                 if "--buildpkg" not in myopts:
16048                         myopts["--buildpkg"] = True
16049
16050         # Always try and fetch binary packages if FEATURES=getbinpkg
16051         if ("getbinpkg" in settings.features):
16052                 myopts["--getbinpkg"] = True
16053
16054         if "--buildpkgonly" in myopts:
16055                 # --buildpkgonly will not merge anything, so
16056                 # it cancels all binary package options.
16057                 for opt in ("--getbinpkg", "--getbinpkgonly",
16058                         "--usepkg", "--usepkgonly"):
16059                         myopts.pop(opt, None)
16060
16061         if "--fetch-all-uri" in myopts:
16062                 myopts["--fetchonly"] = True
16063
16064         if "--skipfirst" in myopts and "--resume" not in myopts:
16065                 myopts["--resume"] = True
16066
16067         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
16068                 myopts["--usepkgonly"] = True
16069
16070         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
16071                 myopts["--getbinpkg"] = True
16072
16073         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
16074                 myopts["--usepkg"] = True
16075
16076         # Also allow -K to apply --usepkg/-k
16077         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
16078                 myopts["--usepkg"] = True
16079
16080         # Allow -p to remove --ask
16081         if ("--pretend" in myopts) and ("--ask" in myopts):
16082                 print ">>> --pretend disables --ask... removing --ask from options."
16083                 del myopts["--ask"]
16084
16085         # forbid --ask when not in a terminal
16086         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
16087         if ("--ask" in myopts) and (not sys.stdin.isatty()):
16088                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
16089                         noiselevel=-1)
16090                 return 1
16091
16092         if settings.get("PORTAGE_DEBUG", "") == "1":
16093                 spinner.update = spinner.update_quiet
16094                 portage.debug=1
16095                 if "python-trace" in settings.features:
16096                         import portage.debug
16097                         portage.debug.set_trace(True)
16098
16099         if not ("--quiet" in myopts):
16100                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
16101                         spinner.update = spinner.update_basic
16102
16103         if myaction == 'version':
16104                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
16105                         settings.profile_path, settings["CHOST"],
16106                         trees[settings["ROOT"]]["vartree"].dbapi)
16107                 return 0
16108         elif "--help" in myopts:
16109                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16110                 return 0
16111
16112         if "--debug" in myopts:
16113                 print "myaction", myaction
16114                 print "myopts", myopts
16115
16116         if not myaction and not myfiles and "--resume" not in myopts:
16117                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
16118                 return 1
16119
16120         pretend = "--pretend" in myopts
16121         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
16122         buildpkgonly = "--buildpkgonly" in myopts
16123
16124         # check if root user is the current user for the actions where emerge needs this
16125         if portage.secpass < 2:
16126                 # We've already allowed "--version" and "--help" above.
16127                 if "--pretend" not in myopts and myaction not in ("search","info"):
16128                         need_superuser = myaction in ('deselect',) or not \
16129                                 (fetchonly or \
16130                                 (buildpkgonly and secpass >= 1) or \
16131                                 myaction in ("metadata", "regen") or \
16132                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
16133                         if portage.secpass < 1 or \
16134                                 need_superuser:
16135                                 if need_superuser:
16136                                         access_desc = "superuser"
16137                                 else:
16138                                         access_desc = "portage group"
16139                                 # Always show portage_group_warning() when only portage group
16140                                 # access is required but the user is not in the portage group.
16141                                 from portage.data import portage_group_warning
16142                                 if "--ask" in myopts:
16143                                         myopts["--pretend"] = True
16144                                         del myopts["--ask"]
16145                                         print ("%s access is required... " + \
16146                                                 "adding --pretend to options.\n") % access_desc
16147                                         if portage.secpass < 1 and not need_superuser:
16148                                                 portage_group_warning()
16149                                 else:
16150                                         sys.stderr.write(("emerge: %s access is " + \
16151                                                 "required.\n\n") % access_desc)
16152                                         if portage.secpass < 1 and not need_superuser:
16153                                                 portage_group_warning()
16154                                         return 1
16155
16156         disable_emergelog = False
16157         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
16158                 if x in myopts:
16159                         disable_emergelog = True
16160                         break
16161         if myaction in ("search", "info"):
16162                 disable_emergelog = True
16163         if disable_emergelog:
16164                 """ Disable emergelog for everything except build or unmerge
16165                 operations.  This helps minimize parallel emerge.log entries that can
16166                 confuse log parsers.  We especially want it disabled during
16167                 parallel-fetch, which uses --resume --fetchonly."""
16168                 global emergelog
16169                 def emergelog(*pargs, **kargs):
16170                         pass
16171
16172         if not "--pretend" in myopts:
16173                 emergelog(xterm_titles, "Started emerge on: "+\
16174                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
16175                 myelogstr=""
16176                 if myopts:
16177                         myelogstr=" ".join(myopts)
16178                 if myaction:
16179                         myelogstr+=" "+myaction
16180                 if myfiles:
16181                         myelogstr += " " + " ".join(oldargs)
16182                 emergelog(xterm_titles, " *** emerge " + myelogstr)
16183         del oldargs
16184
16185         def emergeexitsig(signum, frame):
16186                 signal.signal(signal.SIGINT, signal.SIG_IGN)
16187                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
16188                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
16189                 sys.exit(100+signum)
16190         signal.signal(signal.SIGINT, emergeexitsig)
16191         signal.signal(signal.SIGTERM, emergeexitsig)
16192
16193         def emergeexit():
16194                 """This gets out final log message in before we quit."""
16195                 if "--pretend" not in myopts:
16196                         emergelog(xterm_titles, " *** terminating.")
16197                 if "notitles" not in settings.features:
16198                         xtermTitleReset()
16199         portage.atexit_register(emergeexit)
16200
16201         if myaction in ("config", "metadata", "regen", "sync"):
16202                 if "--pretend" in myopts:
16203                         sys.stderr.write(("emerge: The '%s' action does " + \
16204                                 "not support '--pretend'.\n") % myaction)
16205                         return 1
16206
16207         if "sync" == myaction:
16208                 return action_sync(settings, trees, mtimedb, myopts, myaction)
16209         elif "metadata" == myaction:
16210                 action_metadata(settings, portdb, myopts)
16211         elif myaction=="regen":
16212                 validate_ebuild_environment(trees)
16213                 return action_regen(settings, portdb, myopts.get("--jobs"),
16214                         myopts.get("--load-average"))
16215         # HELP action
16216         elif "config"==myaction:
16217                 validate_ebuild_environment(trees)
16218                 action_config(settings, trees, myopts, myfiles)
16219
16220         # SEARCH action
16221         elif "search"==myaction:
16222                 validate_ebuild_environment(trees)
16223                 action_search(trees[settings["ROOT"]]["root_config"],
16224                         myopts, myfiles, spinner)
16225
16226         elif myaction in ('clean', 'depclean', 'deselect', 'prune', 'unmerge'):
16227                 validate_ebuild_environment(trees)
16228                 rval = action_uninstall(settings, trees, mtimedb["ldpath"],
16229                         myopts, myaction, myfiles, spinner)
16230                 if not (myaction == 'deselect' or buildpkgonly or fetchonly or pretend):
16231                         post_emerge(root_config, myopts, mtimedb, rval)
16232                 return rval
16233
16234         elif myaction == 'info':
16235
16236                 # Ensure atoms are valid before calling unmerge().
16237                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
16238                 valid_atoms = []
16239                 for x in myfiles:
16240                         if is_valid_package_atom(x):
16241                                 try:
16242                                         valid_atoms.append(
16243                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
16244                                 except portage.exception.AmbiguousPackageName, e:
16245                                         msg = "The short ebuild name \"" + x + \
16246                                                 "\" is ambiguous.  Please specify " + \
16247                                                 "one of the following " + \
16248                                                 "fully-qualified ebuild names instead:"
16249                                         for line in textwrap.wrap(msg, 70):
16250                                                 writemsg_level("!!! %s\n" % (line,),
16251                                                         level=logging.ERROR, noiselevel=-1)
16252                                         for i in e[0]:
16253                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
16254                                                         level=logging.ERROR, noiselevel=-1)
16255                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
16256                                         return 1
16257                                 continue
16258                         msg = []
16259                         msg.append("'%s' is not a valid package atom." % (x,))
16260                         msg.append("Please check ebuild(5) for full details.")
16261                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16262                                 level=logging.ERROR, noiselevel=-1)
16263                         return 1
16264
16265                 return action_info(settings, trees, myopts, valid_atoms)
16266
16267         # "update", "system", or just process files:
16268         else:
16269                 validate_ebuild_environment(trees)
16270
16271                 for x in myfiles:
16272                         if x.startswith(SETPREFIX) or \
16273                                 is_valid_package_atom(x):
16274                                 continue
16275                         if x[:1] == os.sep:
16276                                 continue
16277                         try:
16278                                 os.lstat(x)
16279                                 continue
16280                         except OSError:
16281                                 pass
16282                         msg = []
16283                         msg.append("'%s' is not a valid package atom." % (x,))
16284                         msg.append("Please check ebuild(5) for full details.")
16285                         writemsg_level("".join("!!! %s\n" % line for line in msg),
16286                                 level=logging.ERROR, noiselevel=-1)
16287                         return 1
16288
16289                 if "--pretend" not in myopts:
16290                         display_news_notification(root_config, myopts)
16291                 retval = action_build(settings, trees, mtimedb,
16292                         myopts, myaction, myfiles, spinner)
16293                 root_config = trees[settings["ROOT"]]["root_config"]
16294                 post_emerge(root_config, myopts, mtimedb, retval)
16295
16296                 return retval