Add back the --depclean warning about "link level dependencies" since the
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
5
6 import sys
7 # This block ensures that ^C interrupts are handled quietly.
8 try:
9         import signal
10
11         def exithandler(signum,frame):
12                 signal.signal(signal.SIGINT, signal.SIG_IGN)
13                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
14                 sys.exit(1)
15         
16         signal.signal(signal.SIGINT, exithandler)
17         signal.signal(signal.SIGTERM, exithandler)
18         signal.signal(signal.SIGPIPE, signal.SIG_DFL)
19
20 except KeyboardInterrupt:
21         sys.exit(1)
22
23 import array
24 from collections import deque
25 import fcntl
26 import formatter
27 import logging
28 import select
29 import shlex
30 import shutil
31 import textwrap
32 import urlparse
33 import weakref
34 import gc
35 import os, stat
36 import platform
37
38 try:
39         import portage
40 except ImportError:
41         from os import path as osp
42         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
43         import portage
44
45 from portage import digraph
46 from portage.const import NEWS_LIB_PATH
47
48 import _emerge.help
49 import portage.xpak, commands, errno, re, socket, time, types
50 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
51         nc_len, red, teal, turquoise, xtermTitle, \
52         xtermTitleReset, yellow
53 from portage.output import create_color_func
54 good = create_color_func("GOOD")
55 bad = create_color_func("BAD")
56 # white looks bad on terminals with white background
57 from portage.output import bold as white
58
59 import portage.elog
60 import portage.dep
61 portage.dep._dep_check_strict = True
62 import portage.util
63 import portage.locks
64 import portage.exception
65 from portage.data import secpass
66 from portage.elog.messages import eerror
67 from portage.util import normalize_path as normpath
68 from portage.util import writemsg, writemsg_level
69 from portage._sets import load_default_config, SETPREFIX
70 from portage._sets.base import InternalPackageSet
71
72 from itertools import chain, izip
73 from UserDict import DictMixin
74
75 try:
76         import cPickle as pickle
77 except ImportError:
78         import pickle
79
80 try:
81         import cStringIO as StringIO
82 except ImportError:
83         import StringIO
84
85 class stdout_spinner(object):
86         scroll_msgs = [
87                 "Gentoo Rocks ("+platform.system()+")",
88                 "Thank you for using Gentoo. :)",
89                 "Are you actually trying to read this?",
90                 "How many times have you stared at this?",
91                 "We are generating the cache right now",
92                 "You are paying too much attention.",
93                 "A theory is better than its explanation.",
94                 "Phasers locked on target, Captain.",
95                 "Thrashing is just virtual crashing.",
96                 "To be is to program.",
97                 "Real Users hate Real Programmers.",
98                 "When all else fails, read the instructions.",
99                 "Functionality breeds Contempt.",
100                 "The future lies ahead.",
101                 "3.1415926535897932384626433832795028841971694",
102                 "Sometimes insanity is the only alternative.",
103                 "Inaccuracy saves a world of explanation.",
104         ]
105
106         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
107
108         def __init__(self):
109                 self.spinpos = 0
110                 self.update = self.update_twirl
111                 self.scroll_sequence = self.scroll_msgs[
112                         int(time.time() * 100) % len(self.scroll_msgs)]
113                 self.last_update = 0
114                 self.min_display_latency = 0.05
115
116         def _return_early(self):
117                 """
118                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
119                 each update* method should return without doing any output when this
120                 method returns True.
121                 """
122                 cur_time = time.time()
123                 if cur_time - self.last_update < self.min_display_latency:
124                         return True
125                 self.last_update = cur_time
126                 return False
127
128         def update_basic(self):
129                 self.spinpos = (self.spinpos + 1) % 500
130                 if self._return_early():
131                         return
132                 if (self.spinpos % 100) == 0:
133                         if self.spinpos == 0:
134                                 sys.stdout.write(". ")
135                         else:
136                                 sys.stdout.write(".")
137                 sys.stdout.flush()
138
139         def update_scroll(self):
140                 if self._return_early():
141                         return
142                 if(self.spinpos >= len(self.scroll_sequence)):
143                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
144                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
145                 else:
146                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
147                 sys.stdout.flush()
148                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
149
150         def update_twirl(self):
151                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
152                 if self._return_early():
153                         return
154                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
155                 sys.stdout.flush()
156
157         def update_quiet(self):
158                 return
159
160 def userquery(prompt, responses=None, colours=None):
161         """Displays a prompt and a set of responses, then waits for a response
162         which is checked against the responses and the first to match is
163         returned.  An empty response will match the first value in responses.  The
164         input buffer is *not* cleared prior to the prompt!
165
166         prompt: a String.
167         responses: a List of Strings.
168         colours: a List of Functions taking and returning a String, used to
169         process the responses for display. Typically these will be functions
170         like red() but could be e.g. lambda x: "DisplayString".
171         If responses is omitted, defaults to ["Yes", "No"], [green, red].
172         If only colours is omitted, defaults to [bold, ...].
173
174         Returns a member of the List responses. (If called without optional
175         arguments, returns "Yes" or "No".)
176         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
177         printed."""
178         if responses is None:
179                 responses = ["Yes", "No"]
180                 colours = [
181                         create_color_func("PROMPT_CHOICE_DEFAULT"),
182                         create_color_func("PROMPT_CHOICE_OTHER")
183                 ]
184         elif colours is None:
185                 colours=[bold]
186         colours=(colours*len(responses))[:len(responses)]
187         print bold(prompt),
188         try:
189                 while True:
190                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
191                         for key in responses:
192                                 # An empty response will match the first value in responses.
193                                 if response.upper()==key[:len(response)].upper():
194                                         return key
195                         print "Sorry, response '%s' not understood." % response,
196         except (EOFError, KeyboardInterrupt):
197                 print "Interrupted."
198                 sys.exit(1)
199
200 actions = frozenset([
201 "clean", "config", "depclean",
202 "info", "list-sets", "metadata",
203 "prune", "regen",  "search",
204 "sync",  "unmerge",
205 ])
206 options=[
207 "--ask",          "--alphabetical",
208 "--buildpkg",     "--buildpkgonly",
209 "--changelog",    "--columns",
210 "--complete-graph",
211 "--debug",        "--deep",
212 "--digest",
213 "--emptytree",
214 "--fetchonly",    "--fetch-all-uri",
215 "--getbinpkg",    "--getbinpkgonly",
216 "--help",         "--ignore-default-opts",
217 "--keep-going",
218 "--noconfmem",
219 "--newuse",       "--nocolor",
220 "--nodeps",       "--noreplace",
221 "--nospinner",    "--oneshot",
222 "--onlydeps",     "--pretend",
223 "--quiet",        "--resume",
224 "--searchdesc",   "--selective",
225 "--skipfirst",
226 "--tree",
227 "--update",
228 "--usepkg",       "--usepkgonly",
229 "--verbose",      "--version"
230 ]
231
232 shortmapping={
233 "1":"--oneshot",
234 "a":"--ask",
235 "b":"--buildpkg",  "B":"--buildpkgonly",
236 "c":"--clean",     "C":"--unmerge",
237 "d":"--debug",     "D":"--deep",
238 "e":"--emptytree",
239 "f":"--fetchonly", "F":"--fetch-all-uri",
240 "g":"--getbinpkg", "G":"--getbinpkgonly",
241 "h":"--help",
242 "k":"--usepkg",    "K":"--usepkgonly",
243 "l":"--changelog",
244 "n":"--noreplace", "N":"--newuse",
245 "o":"--onlydeps",  "O":"--nodeps",
246 "p":"--pretend",   "P":"--prune",
247 "q":"--quiet",
248 "s":"--search",    "S":"--searchdesc",
249 "t":"--tree",
250 "u":"--update",
251 "v":"--verbose",   "V":"--version"
252 }
253
254 def emergelog(xterm_titles, mystr, short_msg=None):
255         if xterm_titles and short_msg:
256                 if "HOSTNAME" in os.environ:
257                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
258                 xtermTitle(short_msg)
259         try:
260                 file_path = "/var/log/emerge.log"
261                 mylogfile = open(file_path, "a")
262                 portage.util.apply_secpass_permissions(file_path,
263                         uid=portage.portage_uid, gid=portage.portage_gid,
264                         mode=0660)
265                 mylock = None
266                 try:
267                         mylock = portage.locks.lockfile(mylogfile)
268                         # seek because we may have gotten held up by the lock.
269                         # if so, we may not be positioned at the end of the file.
270                         mylogfile.seek(0, 2)
271                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
272                         mylogfile.flush()
273                 finally:
274                         if mylock:
275                                 portage.locks.unlockfile(mylock)
276                         mylogfile.close()
277         except (IOError,OSError,portage.exception.PortageException), e:
278                 if secpass >= 1:
279                         print >> sys.stderr, "emergelog():",e
280
281 def countdown(secs=5, doing="Starting"):
282         if secs:
283                 print ">>> Waiting",secs,"seconds before starting..."
284                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
285                 ticks=range(secs)
286                 ticks.reverse()
287                 for sec in ticks:
288                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
289                         sys.stdout.flush()
290                         time.sleep(1)
291                 print
292
293 # formats a size given in bytes nicely
294 def format_size(mysize):
295         if type(mysize) not in [types.IntType,types.LongType]:
296                 return str(mysize)
297         if 0 != mysize % 1024:
298                 # Always round up to the next kB so that it doesn't show 0 kB when
299                 # some small file still needs to be fetched.
300                 mysize += 1024 - mysize % 1024
301         mystr=str(mysize/1024)
302         mycount=len(mystr)
303         while (mycount > 3):
304                 mycount-=3
305                 mystr=mystr[:mycount]+","+mystr[mycount:]
306         return mystr+" kB"
307
308
309 def getgccversion(chost):
310         """
311         rtype: C{str}
312         return:  the current in-use gcc version
313         """
314
315         gcc_ver_command = 'gcc -dumpversion'
316         gcc_ver_prefix = 'gcc-'
317
318         gcc_not_found_error = red(
319         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
320         "!!! to update the environment of this terminal and possibly\n" +
321         "!!! other terminals also.\n"
322         )
323
324         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
325         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
326                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
327
328         mystatus, myoutput = commands.getstatusoutput(
329                 chost + "-" + gcc_ver_command)
330         if mystatus == os.EX_OK:
331                 return gcc_ver_prefix + myoutput
332
333         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
334         if mystatus == os.EX_OK:
335                 return gcc_ver_prefix + myoutput
336
337         portage.writemsg(gcc_not_found_error, noiselevel=-1)
338         return "[unavailable]"
339
340 def getportageversion(portdir, target_root, profile, chost, vardb):
341         profilever = "unavailable"
342         if profile:
343                 realpath = os.path.realpath(profile)
344                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
345                 if realpath.startswith(basepath):
346                         profilever = realpath[1 + len(basepath):]
347                 else:
348                         try:
349                                 profilever = "!" + os.readlink(profile)
350                         except (OSError):
351                                 pass
352                 del realpath, basepath
353
354         libcver=[]
355         libclist  = vardb.match("virtual/libc")
356         libclist += vardb.match("virtual/glibc")
357         libclist  = portage.util.unique_array(libclist)
358         for x in libclist:
359                 xs=portage.catpkgsplit(x)
360                 if libcver:
361                         libcver+=","+"-".join(xs[1:])
362                 else:
363                         libcver="-".join(xs[1:])
364         if libcver==[]:
365                 libcver="unavailable"
366
367         gccver = getgccversion(chost)
368         unameout=platform.release()+" "+platform.machine()
369
370         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
371
372 def create_depgraph_params(myopts, myaction):
373         #configure emerge engine parameters
374         #
375         # self:      include _this_ package regardless of if it is merged.
376         # selective: exclude the package if it is merged
377         # recurse:   go into the dependencies
378         # deep:      go into the dependencies of already merged packages
379         # empty:     pretend nothing is merged
380         # complete:  completely account for all known dependencies
381         # remove:    build graph for use in removing packages
382         myparams = set(["recurse"])
383
384         if myaction == "remove":
385                 myparams.add("remove")
386                 myparams.add("complete")
387                 return myparams
388
389         if "--update" in myopts or \
390                 "--newuse" in myopts or \
391                 "--reinstall" in myopts or \
392                 "--noreplace" in myopts:
393                 myparams.add("selective")
394         if "--emptytree" in myopts:
395                 myparams.add("empty")
396                 myparams.discard("selective")
397         if "--nodeps" in myopts:
398                 myparams.discard("recurse")
399         if "--deep" in myopts:
400                 myparams.add("deep")
401         if "--complete-graph" in myopts:
402                 myparams.add("complete")
403         return myparams
404
405 # search functionality
406 class search(object):
407
408         #
409         # class constants
410         #
411         VERSION_SHORT=1
412         VERSION_RELEASE=2
413
414         #
415         # public interface
416         #
417         def __init__(self, root_config, spinner, searchdesc,
418                 verbose, usepkg, usepkgonly):
419                 """Searches the available and installed packages for the supplied search key.
420                 The list of available and installed packages is created at object instantiation.
421                 This makes successive searches faster."""
422                 self.settings = root_config.settings
423                 self.vartree = root_config.trees["vartree"]
424                 self.spinner = spinner
425                 self.verbose = verbose
426                 self.searchdesc = searchdesc
427                 self.root_config = root_config
428                 self.setconfig = root_config.setconfig
429                 self.matches = {"pkg" : []}
430                 self.mlen = 0
431
432                 def fake_portdb():
433                         pass
434                 self.portdb = fake_portdb
435                 for attrib in ("aux_get", "cp_all",
436                         "xmatch", "findname", "getFetchMap"):
437                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
438
439                 self._dbs = []
440
441                 portdb = root_config.trees["porttree"].dbapi
442                 bindb = root_config.trees["bintree"].dbapi
443                 vardb = root_config.trees["vartree"].dbapi
444
445                 if not usepkgonly and portdb._have_root_eclass_dir:
446                         self._dbs.append(portdb)
447
448                 if (usepkg or usepkgonly) and bindb.cp_all():
449                         self._dbs.append(bindb)
450
451                 self._dbs.append(vardb)
452                 self._portdb = portdb
453
454         def _cp_all(self):
455                 cp_all = set()
456                 for db in self._dbs:
457                         cp_all.update(db.cp_all())
458                 return list(sorted(cp_all))
459
460         def _aux_get(self, *args, **kwargs):
461                 for db in self._dbs:
462                         try:
463                                 return db.aux_get(*args, **kwargs)
464                         except KeyError:
465                                 pass
466                 raise
467
468         def _findname(self, *args, **kwargs):
469                 for db in self._dbs:
470                         if db is not self._portdb:
471                                 # We don't want findname to return anything
472                                 # unless it's an ebuild in a portage tree.
473                                 # Otherwise, it's already built and we don't
474                                 # care about it.
475                                 continue
476                         func = getattr(db, "findname", None)
477                         if func:
478                                 value = func(*args, **kwargs)
479                                 if value:
480                                         return value
481                 return None
482
483         def _getFetchMap(self, *args, **kwargs):
484                 for db in self._dbs:
485                         func = getattr(db, "getFetchMap", None)
486                         if func:
487                                 value = func(*args, **kwargs)
488                                 if value:
489                                         return value
490                 return {}
491
492         def _visible(self, db, cpv, metadata):
493                 installed = db is self.vartree.dbapi
494                 built = installed or db is not self._portdb
495                 pkg_type = "ebuild"
496                 if installed:
497                         pkg_type = "installed"
498                 elif built:
499                         pkg_type = "binary"
500                 return visible(self.settings,
501                         Package(type_name=pkg_type, root_config=self.root_config,
502                         cpv=cpv, built=built, installed=installed, metadata=metadata))
503
504         def _xmatch(self, level, atom):
505                 """
506                 This method does not expand old-style virtuals because it
507                 is restricted to returning matches for a single ${CATEGORY}/${PN}
508                 and old-style virual matches unreliable for that when querying
509                 multiple package databases. If necessary, old-style virtuals
510                 can be performed on atoms prior to calling this method.
511                 """
512                 cp = portage.dep_getkey(atom)
513                 if level == "match-all":
514                         matches = set()
515                         for db in self._dbs:
516                                 if hasattr(db, "xmatch"):
517                                         matches.update(db.xmatch(level, atom))
518                                 else:
519                                         matches.update(db.match(atom))
520                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
521                         db._cpv_sort_ascending(result)
522                 elif level == "match-visible":
523                         matches = set()
524                         for db in self._dbs:
525                                 if hasattr(db, "xmatch"):
526                                         matches.update(db.xmatch(level, atom))
527                                 else:
528                                         db_keys = list(db._aux_cache_keys)
529                                         for cpv in db.match(atom):
530                                                 metadata = izip(db_keys,
531                                                         db.aux_get(cpv, db_keys))
532                                                 if not self._visible(db, cpv, metadata):
533                                                         continue
534                                                 matches.add(cpv)
535                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
536                         db._cpv_sort_ascending(result)
537                 elif level == "bestmatch-visible":
538                         result = None
539                         for db in self._dbs:
540                                 if hasattr(db, "xmatch"):
541                                         cpv = db.xmatch("bestmatch-visible", atom)
542                                         if not cpv or portage.cpv_getkey(cpv) != cp:
543                                                 continue
544                                         if not result or cpv == portage.best([cpv, result]):
545                                                 result = cpv
546                                 else:
547                                         db_keys = Package.metadata_keys
548                                         # break out of this loop with highest visible
549                                         # match, checked in descending order
550                                         for cpv in reversed(db.match(atom)):
551                                                 if portage.cpv_getkey(cpv) != cp:
552                                                         continue
553                                                 metadata = izip(db_keys,
554                                                         db.aux_get(cpv, db_keys))
555                                                 if not self._visible(db, cpv, metadata):
556                                                         continue
557                                                 if not result or cpv == portage.best([cpv, result]):
558                                                         result = cpv
559                                                 break
560                 else:
561                         raise NotImplementedError(level)
562                 return result
563
564         def execute(self,searchkey):
565                 """Performs the search for the supplied search key"""
566                 match_category = 0
567                 self.searchkey=searchkey
568                 self.packagematches = []
569                 if self.searchdesc:
570                         self.searchdesc=1
571                         self.matches = {"pkg":[], "desc":[]}
572                 else:
573                         self.searchdesc=0
574                         self.matches = {"pkg":[]}
575                 print "Searching...   ",
576
577                 regexsearch = False
578                 if self.searchkey.startswith('%'):
579                         regexsearch = True
580                         self.searchkey = self.searchkey[1:]
581                 if self.searchkey.startswith('@'):
582                         match_category = 1
583                         self.searchkey = self.searchkey[1:]
584                 if regexsearch:
585                         self.searchre=re.compile(self.searchkey,re.I)
586                 else:
587                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
588                 for package in self.portdb.cp_all():
589                         self.spinner.update()
590
591                         if match_category:
592                                 match_string  = package[:]
593                         else:
594                                 match_string  = package.split("/")[-1]
595
596                         masked=0
597                         if self.searchre.search(match_string):
598                                 if not self.portdb.xmatch("match-visible", package):
599                                         masked=1
600                                 self.matches["pkg"].append([package,masked])
601                         elif self.searchdesc: # DESCRIPTION searching
602                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
603                                 if not full_package:
604                                         #no match found; we don't want to query description
605                                         full_package = portage.best(
606                                                 self.portdb.xmatch("match-all", package))
607                                         if not full_package:
608                                                 continue
609                                         else:
610                                                 masked=1
611                                 try:
612                                         full_desc = self.portdb.aux_get(
613                                                 full_package, ["DESCRIPTION"])[0]
614                                 except KeyError:
615                                         print "emerge: search: aux_get() failed, skipping"
616                                         continue
617                                 if self.searchre.search(full_desc):
618                                         self.matches["desc"].append([full_package,masked])
619
620                 self.mlen=0
621                 for mtype in self.matches:
622                         self.matches[mtype].sort()
623                         self.mlen += len(self.matches[mtype])
624
625         def addCP(self, cp):
626                 if not self.portdb.xmatch("match-all", cp):
627                         return
628                 masked = 0
629                 if not self.portdb.xmatch("bestmatch-visible", cp):
630                         masked = 1
631                 self.matches["pkg"].append([cp, masked])
632                 self.mlen += 1
633
634         def output(self):
635                 """Outputs the results of the search."""
636                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
637                 print "[ Applications found : "+white(str(self.mlen))+" ]"
638                 print " "
639                 vardb = self.vartree.dbapi
640                 for mtype in self.matches:
641                         for match,masked in self.matches[mtype]:
642                                 full_package = None
643                                 if mtype == "pkg":
644                                         catpack = match
645                                         full_package = self.portdb.xmatch(
646                                                 "bestmatch-visible", match)
647                                         if not full_package:
648                                                 #no match found; we don't want to query description
649                                                 masked=1
650                                                 full_package = portage.best(
651                                                         self.portdb.xmatch("match-all",match))
652                                 elif mtype == "desc":
653                                         full_package = match
654                                         match        = portage.cpv_getkey(match)
655                                 if full_package:
656                                         try:
657                                                 desc, homepage, license = self.portdb.aux_get(
658                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
659                                         except KeyError:
660                                                 print "emerge: search: aux_get() failed, skipping"
661                                                 continue
662                                         if masked:
663                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
664                                         else:
665                                                 print green("*")+"  "+white(match)
666                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
667
668                                         mysum = [0,0]
669                                         file_size_str = None
670                                         mycat = match.split("/")[0]
671                                         mypkg = match.split("/")[1]
672                                         mycpv = match + "-" + myversion
673                                         myebuild = self.portdb.findname(mycpv)
674                                         if myebuild:
675                                                 pkgdir = os.path.dirname(myebuild)
676                                                 from portage import manifest
677                                                 mf = manifest.Manifest(
678                                                         pkgdir, self.settings["DISTDIR"])
679                                                 try:
680                                                         uri_map = self.portdb.getFetchMap(mycpv)
681                                                 except portage.exception.InvalidDependString, e:
682                                                         file_size_str = "Unknown (%s)" % (e,)
683                                                         del e
684                                                 else:
685                                                         try:
686                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
687                                                         except KeyError, e:
688                                                                 file_size_str = "Unknown (missing " + \
689                                                                         "digest for %s)" % (e,)
690                                                                 del e
691
692                                         available = False
693                                         for db in self._dbs:
694                                                 if db is not vardb and \
695                                                         db.cpv_exists(mycpv):
696                                                         available = True
697                                                         if not myebuild and hasattr(db, "bintree"):
698                                                                 myebuild = db.bintree.getname(mycpv)
699                                                                 try:
700                                                                         mysum[0] = os.stat(myebuild).st_size
701                                                                 except OSError:
702                                                                         myebuild = None
703                                                         break
704
705                                         if myebuild and file_size_str is None:
706                                                 mystr = str(mysum[0] / 1024)
707                                                 mycount = len(mystr)
708                                                 while (mycount > 3):
709                                                         mycount -= 3
710                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
711                                                 file_size_str = mystr + " kB"
712
713                                         if self.verbose:
714                                                 if available:
715                                                         print "     ", darkgreen("Latest version available:"),myversion
716                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
717                                                 if myebuild:
718                                                         print "      %s %s" % \
719                                                                 (darkgreen("Size of files:"), file_size_str)
720                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
721                                                 print "     ", darkgreen("Description:")+"  ",desc
722                                                 print "     ", darkgreen("License:")+"      ",license
723                                                 print
724         #
725         # private interface
726         #
727         def getInstallationStatus(self,package):
728                 installed_package = self.vartree.dep_bestmatch(package)
729                 result = ""
730                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
731                 if len(version) > 0:
732                         result = darkgreen("Latest version installed:")+" "+version
733                 else:
734                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
735                 return result
736
737         def getVersion(self,full_package,detail):
738                 if len(full_package) > 1:
739                         package_parts = portage.catpkgsplit(full_package)
740                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
741                                 result = package_parts[2]+ "-" + package_parts[3]
742                         else:
743                                 result = package_parts[2]
744                 else:
745                         result = ""
746                 return result
747
748 class RootConfig(object):
749         """This is used internally by depgraph to track information about a
750         particular $ROOT."""
751
752         pkg_tree_map = {
753                 "ebuild"    : "porttree",
754                 "binary"    : "bintree",
755                 "installed" : "vartree"
756         }
757
758         tree_pkg_map = {}
759         for k, v in pkg_tree_map.iteritems():
760                 tree_pkg_map[v] = k
761
762         def __init__(self, settings, trees, setconfig):
763                 self.trees = trees
764                 self.settings = settings
765                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
766                 self.root = self.settings["ROOT"]
767                 self.setconfig = setconfig
768                 self.sets = self.setconfig.getSets()
769                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
770
771 def create_world_atom(pkg, args_set, root_config):
772         """Create a new atom for the world file if one does not exist.  If the
773         argument atom is precise enough to identify a specific slot then a slot
774         atom will be returned. Atoms that are in the system set may also be stored
775         in world since system atoms can only match one slot while world atoms can
776         be greedy with respect to slots.  Unslotted system packages will not be
777         stored in world."""
778
779         arg_atom = args_set.findAtomForPackage(pkg)
780         if not arg_atom:
781                 return None
782         cp = portage.dep_getkey(arg_atom)
783         new_world_atom = cp
784         sets = root_config.sets
785         portdb = root_config.trees["porttree"].dbapi
786         vardb = root_config.trees["vartree"].dbapi
787         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
788                 for cpv in portdb.match(cp))
789         slotted = len(available_slots) > 1 or \
790                 (len(available_slots) == 1 and "0" not in available_slots)
791         if not slotted:
792                 # check the vdb in case this is multislot
793                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
794                         for cpv in vardb.match(cp))
795                 slotted = len(available_slots) > 1 or \
796                         (len(available_slots) == 1 and "0" not in available_slots)
797         if slotted and arg_atom != cp:
798                 # If the user gave a specific atom, store it as a
799                 # slot atom in the world file.
800                 slot_atom = pkg.slot_atom
801
802                 # For USE=multislot, there are a couple of cases to
803                 # handle here:
804                 #
805                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
806                 #    unknown value, so just record an unslotted atom.
807                 #
808                 # 2) SLOT comes from an installed package and there is no
809                 #    matching SLOT in the portage tree.
810                 #
811                 # Make sure that the slot atom is available in either the
812                 # portdb or the vardb, since otherwise the user certainly
813                 # doesn't want the SLOT atom recorded in the world file
814                 # (case 1 above).  If it's only available in the vardb,
815                 # the user may be trying to prevent a USE=multislot
816                 # package from being removed by --depclean (case 2 above).
817
818                 mydb = portdb
819                 if not portdb.match(slot_atom):
820                         # SLOT seems to come from an installed multislot package
821                         mydb = vardb
822                 # If there is no installed package matching the SLOT atom,
823                 # it probably changed SLOT spontaneously due to USE=multislot,
824                 # so just record an unslotted atom.
825                 if vardb.match(slot_atom):
826                         # Now verify that the argument is precise
827                         # enough to identify a specific slot.
828                         matches = mydb.match(arg_atom)
829                         matched_slots = set()
830                         for cpv in matches:
831                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
832                         if len(matched_slots) == 1:
833                                 new_world_atom = slot_atom
834
835         if new_world_atom == sets["world"].findAtomForPackage(pkg):
836                 # Both atoms would be identical, so there's nothing to add.
837                 return None
838         if not slotted:
839                 # Unlike world atoms, system atoms are not greedy for slots, so they
840                 # can't be safely excluded from world if they are slotted.
841                 system_atom = sets["system"].findAtomForPackage(pkg)
842                 if system_atom:
843                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
844                                 return None
845                         # System virtuals aren't safe to exclude from world since they can
846                         # match multiple old-style virtuals but only one of them will be
847                         # pulled in by update or depclean.
848                         providers = portdb.mysettings.getvirtuals().get(
849                                 portage.dep_getkey(system_atom))
850                         if providers and len(providers) == 1 and providers[0] == cp:
851                                 return None
852         return new_world_atom
853
854 def filter_iuse_defaults(iuse):
855         for flag in iuse:
856                 if flag.startswith("+") or flag.startswith("-"):
857                         yield flag[1:]
858                 else:
859                         yield flag
860
861 class SlotObject(object):
862         __slots__ = ("__weakref__",)
863
864         def __init__(self, **kwargs):
865                 classes = [self.__class__]
866                 while classes:
867                         c = classes.pop()
868                         if c is SlotObject:
869                                 continue
870                         classes.extend(c.__bases__)
871                         slots = getattr(c, "__slots__", None)
872                         if not slots:
873                                 continue
874                         for myattr in slots:
875                                 myvalue = kwargs.get(myattr, None)
876                                 setattr(self, myattr, myvalue)
877
878         def copy(self):
879                 """
880                 Create a new instance and copy all attributes
881                 defined from __slots__ (including those from
882                 inherited classes).
883                 """
884                 obj = self.__class__()
885
886                 classes = [self.__class__]
887                 while classes:
888                         c = classes.pop()
889                         if c is SlotObject:
890                                 continue
891                         classes.extend(c.__bases__)
892                         slots = getattr(c, "__slots__", None)
893                         if not slots:
894                                 continue
895                         for myattr in slots:
896                                 setattr(obj, myattr, getattr(self, myattr))
897
898                 return obj
899
900 class AbstractDepPriority(SlotObject):
901         __slots__ = ("buildtime", "runtime", "runtime_post")
902
903         def __lt__(self, other):
904                 return self.__int__() < other
905
906         def __le__(self, other):
907                 return self.__int__() <= other
908
909         def __eq__(self, other):
910                 return self.__int__() == other
911
912         def __ne__(self, other):
913                 return self.__int__() != other
914
915         def __gt__(self, other):
916                 return self.__int__() > other
917
918         def __ge__(self, other):
919                 return self.__int__() >= other
920
921         def copy(self):
922                 import copy
923                 return copy.copy(self)
924
925 class DepPriority(AbstractDepPriority):
926         """
927                 This class generates an integer priority level based of various
928                 attributes of the dependency relationship.  Attributes can be assigned
929                 at any time and the new integer value will be generated on calls to the
930                 __int__() method.  Rich comparison operators are supported.
931
932                 The boolean attributes that affect the integer value are "satisfied",
933                 "buildtime", "runtime", and "system".  Various combinations of
934                 attributes lead to the following priority levels:
935
936                 Combination of properties           Priority  Category
937
938                 not satisfied and buildtime            0       HARD
939                 not satisfied and runtime             -1       MEDIUM
940                 not satisfied and runtime_post        -2       MEDIUM_SOFT
941                 satisfied and buildtime and rebuild   -3       SOFT
942                 satisfied and buildtime               -4       SOFT
943                 satisfied and runtime                 -5       SOFT
944                 satisfied and runtime_post            -6       SOFT
945                 (none of the above)                   -6       SOFT
946
947                 Several integer constants are defined for categorization of priority
948                 levels:
949
950                 MEDIUM   The upper boundary for medium dependencies.
951                 MEDIUM_SOFT   The upper boundary for medium-soft dependencies.
952                 SOFT     The upper boundary for soft dependencies.
953                 MIN      The lower boundary for soft dependencies.
954         """
955         __slots__ = ("satisfied", "rebuild")
956         MEDIUM = -1
957         MEDIUM_SOFT = -2
958         SOFT   = -3
959         MIN    = -6
960
961         def __int__(self):
962                 if not self.satisfied:
963                         if self.buildtime:
964                                 return 0
965                         if self.runtime:
966                                 return -1
967                         if self.runtime_post:
968                                 return -2
969                 if self.buildtime:
970                         if self.rebuild:
971                                 return -3
972                         return -4
973                 if self.runtime:
974                         return -5
975                 if self.runtime_post:
976                         return -6
977                 return -6
978
979         def __str__(self):
980                 myvalue = self.__int__()
981                 if myvalue > self.MEDIUM:
982                         return "hard"
983                 if myvalue > self.MEDIUM_SOFT:
984                         return "medium"
985                 if myvalue > self.SOFT:
986                         return "medium-soft"
987                 return "soft"
988
989 class BlockerDepPriority(DepPriority):
990         __slots__ = ()
991         def __int__(self):
992                 return 0
993
994 BlockerDepPriority.instance = BlockerDepPriority()
995
996 class UnmergeDepPriority(AbstractDepPriority):
997         __slots__ = ("satisfied",)
998         """
999         Combination of properties           Priority  Category
1000
1001         runtime                                0       HARD
1002         runtime_post                          -1       HARD
1003         buildtime                             -2       SOFT
1004         (none of the above)                   -2       SOFT
1005         """
1006
1007         MAX    =  0
1008         SOFT   = -2
1009         MIN    = -2
1010
1011         def __int__(self):
1012                 if self.runtime:
1013                         return 0
1014                 if self.runtime_post:
1015                         return -1
1016                 if self.buildtime:
1017                         return -2
1018                 return -2
1019
1020         def __str__(self):
1021                 myvalue = self.__int__()
1022                 if myvalue > self.SOFT:
1023                         return "hard"
1024                 return "soft"
1025
1026 class FakeVartree(portage.vartree):
1027         """This is implements an in-memory copy of a vartree instance that provides
1028         all the interfaces required for use by the depgraph.  The vardb is locked
1029         during the constructor call just long enough to read a copy of the
1030         installed package information.  This allows the depgraph to do it's
1031         dependency calculations without holding a lock on the vardb.  It also
1032         allows things like vardb global updates to be done in memory so that the
1033         user doesn't necessarily need write access to the vardb in cases where
1034         global updates are necessary (updates are performed when necessary if there
1035         is not a matching ebuild in the tree)."""
1036         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1037                 self._root_config = root_config
1038                 if pkg_cache is None:
1039                         pkg_cache = {}
1040                 real_vartree = root_config.trees["vartree"]
1041                 portdb = root_config.trees["porttree"].dbapi
1042                 self.root = real_vartree.root
1043                 self.settings = real_vartree.settings
1044                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1045                 self._pkg_cache = pkg_cache
1046                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1047                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1048                 try:
1049                         # At least the parent needs to exist for the lock file.
1050                         portage.util.ensure_dirs(vdb_path)
1051                 except portage.exception.PortageException:
1052                         pass
1053                 vdb_lock = None
1054                 try:
1055                         if acquire_lock and os.access(vdb_path, os.W_OK):
1056                                 vdb_lock = portage.locks.lockdir(vdb_path)
1057                         real_dbapi = real_vartree.dbapi
1058                         slot_counters = {}
1059                         for cpv in real_dbapi.cpv_all():
1060                                 cache_key = ("installed", self.root, cpv, "nomerge")
1061                                 pkg = self._pkg_cache.get(cache_key)
1062                                 if pkg is not None:
1063                                         metadata = pkg.metadata
1064                                 else:
1065                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1066                                 myslot = metadata["SLOT"]
1067                                 mycp = portage.dep_getkey(cpv)
1068                                 myslot_atom = "%s:%s" % (mycp, myslot)
1069                                 try:
1070                                         mycounter = long(metadata["COUNTER"])
1071                                 except ValueError:
1072                                         mycounter = 0
1073                                         metadata["COUNTER"] = str(mycounter)
1074                                 other_counter = slot_counters.get(myslot_atom, None)
1075                                 if other_counter is not None:
1076                                         if other_counter > mycounter:
1077                                                 continue
1078                                 slot_counters[myslot_atom] = mycounter
1079                                 if pkg is None:
1080                                         pkg = Package(built=True, cpv=cpv,
1081                                                 installed=True, metadata=metadata,
1082                                                 root_config=root_config, type_name="installed")
1083                                 self._pkg_cache[pkg] = pkg
1084                                 self.dbapi.cpv_inject(pkg)
1085                         real_dbapi.flush_cache()
1086                 finally:
1087                         if vdb_lock:
1088                                 portage.locks.unlockdir(vdb_lock)
1089                 # Populate the old-style virtuals using the cached values.
1090                 if not self.settings.treeVirtuals:
1091                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1092                                 portage.getCPFromCPV, self.get_all_provides())
1093
1094                 # Intialize variables needed for lazy cache pulls of the live ebuild
1095                 # metadata.  This ensures that the vardb lock is released ASAP, without
1096                 # being delayed in case cache generation is triggered.
1097                 self._aux_get = self.dbapi.aux_get
1098                 self.dbapi.aux_get = self._aux_get_wrapper
1099                 self._match = self.dbapi.match
1100                 self.dbapi.match = self._match_wrapper
1101                 self._aux_get_history = set()
1102                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1103                 self._portdb = portdb
1104                 self._global_updates = None
1105
1106         def _match_wrapper(self, cpv, use_cache=1):
1107                 """
1108                 Make sure the metadata in Package instances gets updated for any
1109                 cpv that is returned from a match() call, since the metadata can
1110                 be accessed directly from the Package instance instead of via
1111                 aux_get().
1112                 """
1113                 matches = self._match(cpv, use_cache=use_cache)
1114                 for cpv in matches:
1115                         if cpv in self._aux_get_history:
1116                                 continue
1117                         self._aux_get_wrapper(cpv, [])
1118                 return matches
1119
1120         def _aux_get_wrapper(self, pkg, wants):
1121                 if pkg in self._aux_get_history:
1122                         return self._aux_get(pkg, wants)
1123                 self._aux_get_history.add(pkg)
1124                 try:
1125                         # Use the live ebuild metadata if possible.
1126                         live_metadata = dict(izip(self._portdb_keys,
1127                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1128                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1129                                 raise KeyError(pkg)
1130                         self.dbapi.aux_update(pkg, live_metadata)
1131                 except (KeyError, portage.exception.PortageException):
1132                         if self._global_updates is None:
1133                                 self._global_updates = \
1134                                         grab_global_updates(self._portdb.porttree_root)
1135                         perform_global_updates(
1136                                 pkg, self.dbapi, self._global_updates)
1137                 return self._aux_get(pkg, wants)
1138
1139         def sync(self, acquire_lock=1):
1140                 """
1141                 Call this method to synchronize state with the real vardb
1142                 after one or more packages may have been installed or
1143                 uninstalled.
1144                 """
1145                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1146                 try:
1147                         # At least the parent needs to exist for the lock file.
1148                         portage.util.ensure_dirs(vdb_path)
1149                 except portage.exception.PortageException:
1150                         pass
1151                 vdb_lock = None
1152                 try:
1153                         if acquire_lock and os.access(vdb_path, os.W_OK):
1154                                 vdb_lock = portage.locks.lockdir(vdb_path)
1155                         self._sync()
1156                 finally:
1157                         if vdb_lock:
1158                                 portage.locks.unlockdir(vdb_lock)
1159
1160         def _sync(self):
1161
1162                 real_vardb = self._root_config.trees["vartree"].dbapi
1163                 current_cpv_set = frozenset(real_vardb.cpv_all())
1164                 pkg_vardb = self.dbapi
1165                 aux_get_history = self._aux_get_history
1166
1167                 # Remove any packages that have been uninstalled.
1168                 for pkg in list(pkg_vardb):
1169                         if pkg.cpv not in current_cpv_set:
1170                                 pkg_vardb.cpv_remove(pkg)
1171                                 aux_get_history.discard(pkg.cpv)
1172
1173                 # Validate counters and timestamps.
1174                 slot_counters = {}
1175                 root = self.root
1176                 validation_keys = ["COUNTER", "_mtime_"]
1177                 for cpv in current_cpv_set:
1178
1179                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1180                         pkg = pkg_vardb.get(pkg_hash_key)
1181                         if pkg is not None:
1182                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1183
1184                                 if counter != pkg.metadata["COUNTER"] or \
1185                                         mtime != pkg.mtime:
1186                                         pkg_vardb.cpv_remove(pkg)
1187                                         aux_get_history.discard(pkg.cpv)
1188                                         pkg = None
1189
1190                         if pkg is None:
1191                                 pkg = self._pkg(cpv)
1192
1193                         other_counter = slot_counters.get(pkg.slot_atom)
1194                         if other_counter is not None:
1195                                 if other_counter > pkg.counter:
1196                                         continue
1197
1198                         slot_counters[pkg.slot_atom] = pkg.counter
1199                         pkg_vardb.cpv_inject(pkg)
1200
1201                 real_vardb.flush_cache()
1202
1203         def _pkg(self, cpv):
1204                 root_config = self._root_config
1205                 real_vardb = root_config.trees["vartree"].dbapi
1206                 db_keys = list(real_vardb._aux_cache_keys)
1207                 pkg = Package(cpv=cpv, installed=True,
1208                         metadata=izip(db_keys, real_vardb.aux_get(cpv, db_keys)),
1209                         root_config=root_config,
1210                         type_name="installed")
1211                 return pkg
1212
1213 def grab_global_updates(portdir):
1214         from portage.update import grab_updates, parse_updates
1215         updpath = os.path.join(portdir, "profiles", "updates")
1216         try:
1217                 rawupdates = grab_updates(updpath)
1218         except portage.exception.DirectoryNotFound:
1219                 rawupdates = []
1220         upd_commands = []
1221         for mykey, mystat, mycontent in rawupdates:
1222                 commands, errors = parse_updates(mycontent)
1223                 upd_commands.extend(commands)
1224         return upd_commands
1225
1226 def perform_global_updates(mycpv, mydb, mycommands):
1227         from portage.update import update_dbentries
1228         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1229         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1230         updates = update_dbentries(mycommands, aux_dict)
1231         if updates:
1232                 mydb.aux_update(mycpv, updates)
1233
1234 def visible(pkgsettings, pkg):
1235         """
1236         Check if a package is visible. This can raise an InvalidDependString
1237         exception if LICENSE is invalid.
1238         TODO: optionally generate a list of masking reasons
1239         @rtype: Boolean
1240         @returns: True if the package is visible, False otherwise.
1241         """
1242         if not pkg.metadata["SLOT"]:
1243                 return False
1244         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1245                 if not pkgsettings._accept_chost(pkg):
1246                         return False
1247         eapi = pkg.metadata["EAPI"]
1248         if not portage.eapi_is_supported(eapi):
1249                 return False
1250         if not pkg.installed:
1251                 if portage._eapi_is_deprecated(eapi):
1252                         return False
1253                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1254                         return False
1255         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1256                 return False
1257         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1258                 return False
1259         try:
1260                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1261                         return False
1262         except portage.exception.InvalidDependString:
1263                 return False
1264         return True
1265
1266 def get_masking_status(pkg, pkgsettings, root_config):
1267
1268         mreasons = portage.getmaskingstatus(
1269                 pkg, settings=pkgsettings,
1270                 portdb=root_config.trees["porttree"].dbapi)
1271
1272         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1273                 if not pkgsettings._accept_chost(pkg):
1274                         mreasons.append("CHOST: %s" % \
1275                                 pkg.metadata["CHOST"])
1276
1277         if not pkg.metadata["SLOT"]:
1278                 mreasons.append("invalid: SLOT is undefined")
1279
1280         return mreasons
1281
1282 def get_mask_info(root_config, cpv, pkgsettings,
1283         db, pkg_type, built, installed, db_keys):
1284         eapi_masked = False
1285         try:
1286                 metadata = dict(izip(db_keys,
1287                         db.aux_get(cpv, db_keys)))
1288         except KeyError:
1289                 metadata = None
1290         if metadata and not built:
1291                 pkgsettings.setcpv(cpv, mydb=metadata)
1292                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1293         if metadata is None:
1294                 mreasons = ["corruption"]
1295         else:
1296                 pkg = Package(type_name=pkg_type, root_config=root_config,
1297                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1298                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1299         return metadata, mreasons
1300
1301 def show_masked_packages(masked_packages):
1302         shown_licenses = set()
1303         shown_comments = set()
1304         # Maybe there is both an ebuild and a binary. Only
1305         # show one of them to avoid redundant appearance.
1306         shown_cpvs = set()
1307         have_eapi_mask = False
1308         for (root_config, pkgsettings, cpv,
1309                 metadata, mreasons) in masked_packages:
1310                 if cpv in shown_cpvs:
1311                         continue
1312                 shown_cpvs.add(cpv)
1313                 comment, filename = None, None
1314                 if "package.mask" in mreasons:
1315                         comment, filename = \
1316                                 portage.getmaskingreason(
1317                                 cpv, metadata=metadata,
1318                                 settings=pkgsettings,
1319                                 portdb=root_config.trees["porttree"].dbapi,
1320                                 return_location=True)
1321                 missing_licenses = []
1322                 if metadata:
1323                         if not portage.eapi_is_supported(metadata["EAPI"]):
1324                                 have_eapi_mask = True
1325                         try:
1326                                 missing_licenses = \
1327                                         pkgsettings._getMissingLicenses(
1328                                                 cpv, metadata)
1329                         except portage.exception.InvalidDependString:
1330                                 # This will have already been reported
1331                                 # above via mreasons.
1332                                 pass
1333
1334                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1335                 if comment and comment not in shown_comments:
1336                         print filename+":"
1337                         print comment
1338                         shown_comments.add(comment)
1339                 portdb = root_config.trees["porttree"].dbapi
1340                 for l in missing_licenses:
1341                         l_path = portdb.findLicensePath(l)
1342                         if l in shown_licenses:
1343                                 continue
1344                         msg = ("A copy of the '%s' license" + \
1345                         " is located at '%s'.") % (l, l_path)
1346                         print msg
1347                         print
1348                         shown_licenses.add(l)
1349         return have_eapi_mask
1350
1351 class Task(SlotObject):
1352         __slots__ = ("_hash_key", "_hash_value")
1353
1354         def _get_hash_key(self):
1355                 hash_key = getattr(self, "_hash_key", None)
1356                 if hash_key is None:
1357                         raise NotImplementedError(self)
1358                 return hash_key
1359
1360         def __eq__(self, other):
1361                 return self._get_hash_key() == other
1362
1363         def __ne__(self, other):
1364                 return self._get_hash_key() != other
1365
1366         def __hash__(self):
1367                 hash_value = getattr(self, "_hash_value", None)
1368                 if hash_value is None:
1369                         self._hash_value = hash(self._get_hash_key())
1370                 return self._hash_value
1371
1372         def __len__(self):
1373                 return len(self._get_hash_key())
1374
1375         def __getitem__(self, key):
1376                 return self._get_hash_key()[key]
1377
1378         def __iter__(self):
1379                 return iter(self._get_hash_key())
1380
1381         def __contains__(self, key):
1382                 return key in self._get_hash_key()
1383
1384         def __str__(self):
1385                 return str(self._get_hash_key())
1386
1387 class Blocker(Task):
1388
1389         __hash__ = Task.__hash__
1390         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1391
1392         def __init__(self, **kwargs):
1393                 Task.__init__(self, **kwargs)
1394                 self.cp = portage.dep_getkey(self.atom)
1395
1396         def _get_hash_key(self):
1397                 hash_key = getattr(self, "_hash_key", None)
1398                 if hash_key is None:
1399                         self._hash_key = \
1400                                 ("blocks", self.root, self.atom, self.eapi)
1401                 return self._hash_key
1402
1403 class Package(Task):
1404
1405         __hash__ = Task.__hash__
1406         __slots__ = ("built", "cpv", "depth",
1407                 "installed", "metadata", "onlydeps", "operation",
1408                 "root_config", "type_name",
1409                 "category", "counter", "cp", "cpv_split",
1410                 "inherited", "iuse", "mtime",
1411                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1412
1413         metadata_keys = [
1414                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1415                 "INHERITED", "IUSE", "KEYWORDS",
1416                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1417                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1418
1419         def __init__(self, **kwargs):
1420                 Task.__init__(self, **kwargs)
1421                 self.root = self.root_config.root
1422                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1423                 self.cp = portage.cpv_getkey(self.cpv)
1424                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1425                 self.category, self.pf = portage.catsplit(self.cpv)
1426                 self.cpv_split = portage.catpkgsplit(self.cpv)
1427                 self.pv_split = self.cpv_split[1:]
1428
1429         class _use(object):
1430
1431                 __slots__ = ("__weakref__", "enabled")
1432
1433                 def __init__(self, use):
1434                         self.enabled = frozenset(use)
1435
1436         class _iuse(object):
1437
1438                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1439
1440                 def __init__(self, tokens, iuse_implicit):
1441                         self.tokens = tuple(tokens)
1442                         self.iuse_implicit = iuse_implicit
1443                         enabled = []
1444                         disabled = []
1445                         other = []
1446                         for x in tokens:
1447                                 prefix = x[:1]
1448                                 if prefix == "+":
1449                                         enabled.append(x[1:])
1450                                 elif prefix == "-":
1451                                         disabled.append(x[1:])
1452                                 else:
1453                                         other.append(x)
1454                         self.enabled = frozenset(enabled)
1455                         self.disabled = frozenset(disabled)
1456                         self.all = frozenset(chain(enabled, disabled, other))
1457
1458                 def __getattribute__(self, name):
1459                         if name == "regex":
1460                                 try:
1461                                         return object.__getattribute__(self, "regex")
1462                                 except AttributeError:
1463                                         all = object.__getattribute__(self, "all")
1464                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1465                                         # Escape anything except ".*" which is supposed
1466                                         # to pass through from _get_implicit_iuse()
1467                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1468                                         regex = "^(%s)$" % "|".join(regex)
1469                                         regex = regex.replace("\\.\\*", ".*")
1470                                         self.regex = re.compile(regex)
1471                         return object.__getattribute__(self, name)
1472
1473         def _get_hash_key(self):
1474                 hash_key = getattr(self, "_hash_key", None)
1475                 if hash_key is None:
1476                         if self.operation is None:
1477                                 self.operation = "merge"
1478                                 if self.onlydeps or self.installed:
1479                                         self.operation = "nomerge"
1480                         self._hash_key = \
1481                                 (self.type_name, self.root, self.cpv, self.operation)
1482                 return self._hash_key
1483
1484         def __cmp__(self, other):
1485                 if self > other:
1486                         return 1
1487                 elif self < other:
1488                         return -1
1489                 return 0
1490
1491         def __lt__(self, other):
1492                 if other.cp != self.cp:
1493                         return False
1494                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1495                         return True
1496                 return False
1497
1498         def __le__(self, other):
1499                 if other.cp != self.cp:
1500                         return False
1501                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1502                         return True
1503                 return False
1504
1505         def __gt__(self, other):
1506                 if other.cp != self.cp:
1507                         return False
1508                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1509                         return True
1510                 return False
1511
1512         def __ge__(self, other):
1513                 if other.cp != self.cp:
1514                         return False
1515                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1516                         return True
1517                 return False
1518
1519 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1520         if not x.startswith("UNUSED_"))
1521 _all_metadata_keys.discard("CDEPEND")
1522 _all_metadata_keys.update(Package.metadata_keys)
1523
1524 from portage.cache.mappings import slot_dict_class
1525 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1526
1527 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1528         """
1529         Detect metadata updates and synchronize Package attributes.
1530         """
1531
1532         __slots__ = ("_pkg",)
1533         _wrapped_keys = frozenset(
1534                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1535
1536         def __init__(self, pkg, metadata):
1537                 _PackageMetadataWrapperBase.__init__(self)
1538                 self._pkg = pkg
1539                 self.update(metadata)
1540
1541         def __setitem__(self, k, v):
1542                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1543                 if k in self._wrapped_keys:
1544                         getattr(self, "_set_" + k.lower())(k, v)
1545
1546         def _set_inherited(self, k, v):
1547                 if isinstance(v, basestring):
1548                         v = frozenset(v.split())
1549                 self._pkg.inherited = v
1550
1551         def _set_iuse(self, k, v):
1552                 self._pkg.iuse = self._pkg._iuse(
1553                         v.split(), self._pkg.root_config.iuse_implicit)
1554
1555         def _set_slot(self, k, v):
1556                 self._pkg.slot = v
1557
1558         def _set_use(self, k, v):
1559                 self._pkg.use = self._pkg._use(v.split())
1560
1561         def _set_counter(self, k, v):
1562                 if isinstance(v, basestring):
1563                         try:
1564                                 v = int(v.strip())
1565                         except ValueError:
1566                                 v = 0
1567                 self._pkg.counter = v
1568
1569         def _set__mtime_(self, k, v):
1570                 if isinstance(v, basestring):
1571                         try:
1572                                 v = float(v.strip())
1573                         except ValueError:
1574                                 v = 0
1575                 self._pkg.mtime = v
1576
1577 class EbuildFetchonly(SlotObject):
1578
1579         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1580
1581         def execute(self):
1582                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1583                 # ensuring sane $PWD (bug #239560) and storing elog
1584                 # messages. Use a private temp directory, in order
1585                 # to avoid locking the main one.
1586                 settings = self.settings
1587                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1588                 from tempfile import mkdtemp
1589                 private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1590                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1591                 settings.backup_changes("PORTAGE_TMPDIR")
1592                 try:
1593                         retval = self._execute()
1594                 finally:
1595                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1596                         settings.backup_changes("PORTAGE_TMPDIR")
1597                         shutil.rmtree(private_tmpdir)
1598                 return retval
1599
1600         def _execute(self):
1601                 settings = self.settings
1602                 pkg = self.pkg
1603                 root_config = pkg.root_config
1604                 portdb = root_config.trees["porttree"].dbapi
1605                 ebuild_path = portdb.findname(pkg.cpv)
1606                 settings.setcpv(pkg)
1607                 debug = settings.get("PORTAGE_DEBUG") == "1"
1608                 use_cache = 1 # always true
1609                 portage.doebuild_environment(ebuild_path, "fetch",
1610                         root_config.root, settings, debug, use_cache, portdb)
1611                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1612
1613                 retval = portage.doebuild(ebuild_path, "fetch",
1614                         self.settings["ROOT"], self.settings, debug=debug,
1615                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1616                         mydbapi=portdb, tree="porttree")
1617
1618                 if retval != os.EX_OK:
1619                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1620                         eerror(msg, phase="unpack", key=pkg.cpv)
1621
1622                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1623                 return retval
1624
1625 class AsynchronousTask(SlotObject):
1626         """
1627         Subclasses override _wait() and _poll() so that calls
1628         to public methods can be wrapped for implementing
1629         hooks such as exit listener notification.
1630
1631         Sublasses should call self.wait() to notify exit listeners after
1632         the task is complete and self.returncode has been set.
1633         """
1634
1635         __slots__ = ("background", "cancelled", "returncode") + \
1636                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1637
1638         def start(self):
1639                 """
1640                 Start an asynchronous task and then return as soon as possible.
1641                 """
1642                 self._start()
1643                 self._start_hook()
1644
1645         def _start(self):
1646                 raise NotImplementedError(self)
1647
1648         def isAlive(self):
1649                 return self.returncode is None
1650
1651         def poll(self):
1652                 self._wait_hook()
1653                 return self._poll()
1654
1655         def _poll(self):
1656                 return self.returncode
1657
1658         def wait(self):
1659                 if self.returncode is None:
1660                         self._wait()
1661                 self._wait_hook()
1662                 return self.returncode
1663
1664         def _wait(self):
1665                 return self.returncode
1666
1667         def cancel(self):
1668                 self.cancelled = True
1669                 self.wait()
1670
1671         def addStartListener(self, f):
1672                 """
1673                 The function will be called with one argument, a reference to self.
1674                 """
1675                 if self._start_listeners is None:
1676                         self._start_listeners = []
1677                 self._start_listeners.append(f)
1678
1679         def removeStartListener(self, f):
1680                 if self._start_listeners is None:
1681                         return
1682                 self._start_listeners.remove(f)
1683
1684         def _start_hook(self):
1685                 if self._start_listeners is not None:
1686                         start_listeners = self._start_listeners
1687                         self._start_listeners = None
1688
1689                         for f in start_listeners:
1690                                 f(self)
1691
1692         def addExitListener(self, f):
1693                 """
1694                 The function will be called with one argument, a reference to self.
1695                 """
1696                 if self._exit_listeners is None:
1697                         self._exit_listeners = []
1698                 self._exit_listeners.append(f)
1699
1700         def removeExitListener(self, f):
1701                 if self._exit_listeners is None:
1702                         if self._exit_listener_stack is not None:
1703                                 self._exit_listener_stack.remove(f)
1704                         return
1705                 self._exit_listeners.remove(f)
1706
1707         def _wait_hook(self):
1708                 """
1709                 Call this method after the task completes, just before returning
1710                 the returncode from wait() or poll(). This hook is
1711                 used to trigger exit listeners when the returncode first
1712                 becomes available.
1713                 """
1714                 if self.returncode is not None and \
1715                         self._exit_listeners is not None:
1716
1717                         # This prevents recursion, in case one of the
1718                         # exit handlers triggers this method again by
1719                         # calling wait(). Use a stack that gives
1720                         # removeExitListener() an opportunity to consume
1721                         # listeners from the stack, before they can get
1722                         # called below. This is necessary because a call
1723                         # to one exit listener may result in a call to
1724                         # removeExitListener() for another listener on
1725                         # the stack. That listener needs to be removed
1726                         # from the stack since it would be inconsistent
1727                         # to call it after it has been been passed into
1728                         # removeExitListener().
1729                         self._exit_listener_stack = self._exit_listeners
1730                         self._exit_listeners = None
1731
1732                         self._exit_listener_stack.reverse()
1733                         while self._exit_listener_stack:
1734                                 self._exit_listener_stack.pop()(self)
1735
1736 class PipeReader(AsynchronousTask):
1737
1738         """
1739         Reads output from one or more files and saves it in memory,
1740         for retrieval via the getvalue() method. This is driven by
1741         the scheduler's poll() loop, so it runs entirely within the
1742         current process.
1743         """
1744
1745         __slots__ = ("input_files", "scheduler",) + \
1746                 ("pid", "_read_data", "_registered", "_reg_ids")
1747
1748         _bufsize = 4096
1749
1750         def _start(self):
1751                 self._reg_ids = set()
1752                 self._read_data = []
1753                 for k, f in self.input_files.iteritems():
1754                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1755                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1756                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1757                                 PollConstants.POLLIN, self._output_handler))
1758                 self._registered = True
1759
1760         def isAlive(self):
1761                 return self._registered
1762
1763         def _wait(self):
1764                 if self.returncode is not None:
1765                         return self.returncode
1766
1767                 if self._registered:
1768                         self.scheduler.schedule(self._reg_ids)
1769                         self._unregister()
1770
1771                 self.returncode = os.EX_OK
1772                 return self.returncode
1773
1774         def getvalue(self):
1775                 """Retrieve the entire contents"""
1776                 return "".join(self._read_data)
1777
1778         def close(self):
1779                 """Free the memory buffer."""
1780                 self._read_data = None
1781
1782         def _output_handler(self, fd, event):
1783                 files = self.input_files
1784                 for f in files.itervalues():
1785                         if fd == f.fileno():
1786                                 break
1787
1788                 buf = array.array('B')
1789                 try:
1790                         buf.fromfile(f, self._bufsize)
1791                 except EOFError:
1792                         pass
1793
1794                 if buf:
1795                         self._read_data.append(buf.tostring())
1796                 else:
1797                         self._unregister()
1798                         self.wait()
1799
1800                 return self._registered
1801
1802         def _unregister(self):
1803                 """
1804                 Unregister from the scheduler and close open files.
1805                 """
1806
1807                 self._registered = False
1808
1809                 if self._reg_ids is not None:
1810                         for reg_id in self._reg_ids:
1811                                 self.scheduler.unregister(reg_id)
1812                         self._reg_ids = None
1813
1814                 if self.input_files is not None:
1815                         for f in self.input_files.itervalues():
1816                                 f.close()
1817                         self.input_files = None
1818
1819 class CompositeTask(AsynchronousTask):
1820
1821         __slots__ = ("scheduler",) + ("_current_task",)
1822
1823         def isAlive(self):
1824                 return self._current_task is not None
1825
1826         def cancel(self):
1827                 self.cancelled = True
1828                 if self._current_task is not None:
1829                         self._current_task.cancel()
1830
1831         def _poll(self):
1832                 """
1833                 This does a loop calling self._current_task.poll()
1834                 repeatedly as long as the value of self._current_task
1835                 keeps changing. It calls poll() a maximum of one time
1836                 for a given self._current_task instance. This is useful
1837                 since calling poll() on a task can trigger advance to
1838                 the next task could eventually lead to the returncode
1839                 being set in cases when polling only a single task would
1840                 not have the same effect.
1841                 """
1842
1843                 prev = None
1844                 while True:
1845                         task = self._current_task
1846                         if task is None or task is prev:
1847                                 # don't poll the same task more than once
1848                                 break
1849                         task.poll()
1850                         prev = task
1851
1852                 return self.returncode
1853
1854         def _wait(self):
1855
1856                 prev = None
1857                 while True:
1858                         task = self._current_task
1859                         if task is None:
1860                                 # don't wait for the same task more than once
1861                                 break
1862                         if task is prev:
1863                                 # Before the task.wait() method returned, an exit
1864                                 # listener should have set self._current_task to either
1865                                 # a different task or None. Something is wrong.
1866                                 raise AssertionError("self._current_task has not " + \
1867                                         "changed since calling wait", self, task)
1868                         task.wait()
1869                         prev = task
1870
1871                 return self.returncode
1872
1873         def _assert_current(self, task):
1874                 """
1875                 Raises an AssertionError if the given task is not the
1876                 same one as self._current_task. This can be useful
1877                 for detecting bugs.
1878                 """
1879                 if task is not self._current_task:
1880                         raise AssertionError("Unrecognized task: %s" % (task,))
1881
1882         def _default_exit(self, task):
1883                 """
1884                 Calls _assert_current() on the given task and then sets the
1885                 composite returncode attribute if task.returncode != os.EX_OK.
1886                 If the task failed then self._current_task will be set to None.
1887                 Subclasses can use this as a generic task exit callback.
1888
1889                 @rtype: int
1890                 @returns: The task.returncode attribute.
1891                 """
1892                 self._assert_current(task)
1893                 if task.returncode != os.EX_OK:
1894                         self.returncode = task.returncode
1895                         self._current_task = None
1896                 return task.returncode
1897
1898         def _final_exit(self, task):
1899                 """
1900                 Assumes that task is the final task of this composite task.
1901                 Calls _default_exit() and sets self.returncode to the task's
1902                 returncode and sets self._current_task to None.
1903                 """
1904                 self._default_exit(task)
1905                 self._current_task = None
1906                 self.returncode = task.returncode
1907                 return self.returncode
1908
1909         def _default_final_exit(self, task):
1910                 """
1911                 This calls _final_exit() and then wait().
1912
1913                 Subclasses can use this as a generic final task exit callback.
1914
1915                 """
1916                 self._final_exit(task)
1917                 return self.wait()
1918
1919         def _start_task(self, task, exit_handler):
1920                 """
1921                 Register exit handler for the given task, set it
1922                 as self._current_task, and call task.start().
1923
1924                 Subclasses can use this as a generic way to start
1925                 a task.
1926
1927                 """
1928                 task.addExitListener(exit_handler)
1929                 self._current_task = task
1930                 task.start()
1931
1932 class TaskSequence(CompositeTask):
1933         """
1934         A collection of tasks that executes sequentially. Each task
1935         must have a addExitListener() method that can be used as
1936         a means to trigger movement from one task to the next.
1937         """
1938
1939         __slots__ = ("_task_queue",)
1940
1941         def __init__(self, **kwargs):
1942                 AsynchronousTask.__init__(self, **kwargs)
1943                 self._task_queue = deque()
1944
1945         def add(self, task):
1946                 self._task_queue.append(task)
1947
1948         def _start(self):
1949                 self._start_next_task()
1950
1951         def cancel(self):
1952                 self._task_queue.clear()
1953                 CompositeTask.cancel(self)
1954
1955         def _start_next_task(self):
1956                 self._start_task(self._task_queue.popleft(),
1957                         self._task_exit_handler)
1958
1959         def _task_exit_handler(self, task):
1960                 if self._default_exit(task) != os.EX_OK:
1961                         self.wait()
1962                 elif self._task_queue:
1963                         self._start_next_task()
1964                 else:
1965                         self._final_exit(task)
1966                         self.wait()
1967
1968 class SubProcess(AsynchronousTask):
1969
1970         __slots__ = ("scheduler",) + ("pid", "_files", "_registered", "_reg_id")
1971
1972         # A file descriptor is required for the scheduler to monitor changes from
1973         # inside a poll() loop. When logging is not enabled, create a pipe just to
1974         # serve this purpose alone.
1975         _dummy_pipe_fd = 9
1976
1977         def _poll(self):
1978                 if self.returncode is not None:
1979                         return self.returncode
1980                 if self.pid is None:
1981                         return self.returncode
1982                 if self._registered:
1983                         return self.returncode
1984
1985                 try:
1986                         retval = os.waitpid(self.pid, os.WNOHANG)
1987                 except OSError, e:
1988                         if e.errno != errno.ECHILD:
1989                                 raise
1990                         del e
1991                         retval = (self.pid, 1)
1992
1993                 if retval == (0, 0):
1994                         return None
1995                 self._set_returncode(retval)
1996                 return self.returncode
1997
1998         def cancel(self):
1999                 if self.isAlive():
2000                         try:
2001                                 os.kill(self.pid, signal.SIGTERM)
2002                         except OSError, e:
2003                                 if e.errno != errno.ESRCH:
2004                                         raise
2005                                 del e
2006
2007                 self.cancelled = True
2008                 if self.pid is not None:
2009                         self.wait()
2010                 return self.returncode
2011
2012         def isAlive(self):
2013                 return self.pid is not None and \
2014                         self.returncode is None
2015
2016         def _wait(self):
2017
2018                 if self.returncode is not None:
2019                         return self.returncode
2020
2021                 if self._registered:
2022                         self.scheduler.schedule(self._reg_id)
2023                         self._unregister()
2024                         if self.returncode is not None:
2025                                 return self.returncode
2026
2027                 try:
2028                         wait_retval = os.waitpid(self.pid, 0)
2029                 except OSError, e:
2030                         if e.errno != errno.ECHILD:
2031                                 raise
2032                         del e
2033                         self._set_returncode((self.pid, 1))
2034                 else:
2035                         self._set_returncode(wait_retval)
2036
2037                 return self.returncode
2038
2039         def _unregister(self):
2040                 """
2041                 Unregister from the scheduler and close open files.
2042                 """
2043
2044                 self._registered = False
2045
2046                 if self._reg_id is not None:
2047                         self.scheduler.unregister(self._reg_id)
2048                         self._reg_id = None
2049
2050                 if self._files is not None:
2051                         for f in self._files.itervalues():
2052                                 f.close()
2053                         self._files = None
2054
2055         def _set_returncode(self, wait_retval):
2056
2057                 retval = wait_retval[1]
2058
2059                 if retval != os.EX_OK:
2060                         if retval & 0xff:
2061                                 retval = (retval & 0xff) << 8
2062                         else:
2063                                 retval = retval >> 8
2064
2065                 self.returncode = retval
2066
2067 class SpawnProcess(SubProcess):
2068
2069         """
2070         Constructor keyword args are passed into portage.process.spawn().
2071         The required "args" keyword argument will be passed as the first
2072         spawn() argument.
2073         """
2074
2075         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2076                 "uid", "gid", "groups", "umask", "logfile",
2077                 "path_lookup", "pre_exec")
2078
2079         __slots__ = ("args",) + \
2080                 _spawn_kwarg_names
2081
2082         _file_names = ("log", "process", "stdout")
2083         _files_dict = slot_dict_class(_file_names, prefix="")
2084         _bufsize = 4096
2085
2086         def _start(self):
2087
2088                 if self.cancelled:
2089                         return
2090
2091                 if self.fd_pipes is None:
2092                         self.fd_pipes = {}
2093                 fd_pipes = self.fd_pipes
2094                 fd_pipes.setdefault(0, sys.stdin.fileno())
2095                 fd_pipes.setdefault(1, sys.stdout.fileno())
2096                 fd_pipes.setdefault(2, sys.stderr.fileno())
2097
2098                 # flush any pending output
2099                 for fd in fd_pipes.itervalues():
2100                         if fd == sys.stdout.fileno():
2101                                 sys.stdout.flush()
2102                         if fd == sys.stderr.fileno():
2103                                 sys.stderr.flush()
2104
2105                 logfile = self.logfile
2106                 self._files = self._files_dict()
2107                 files = self._files
2108
2109                 master_fd, slave_fd = self._pipe(fd_pipes)
2110                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2111                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2112
2113                 null_input = None
2114                 fd_pipes_orig = fd_pipes.copy()
2115                 if self.background:
2116                         # TODO: Use job control functions like tcsetpgrp() to control
2117                         # access to stdin. Until then, use /dev/null so that any
2118                         # attempts to read from stdin will immediately return EOF
2119                         # instead of blocking indefinitely.
2120                         null_input = open('/dev/null', 'rb')
2121                         fd_pipes[0] = null_input.fileno()
2122                 else:
2123                         fd_pipes[0] = fd_pipes_orig[0]
2124
2125                 files.process = os.fdopen(master_fd, 'r')
2126                 if logfile is not None:
2127
2128                         fd_pipes[1] = slave_fd
2129                         fd_pipes[2] = slave_fd
2130
2131                         files.log = open(logfile, "a")
2132                         portage.util.apply_secpass_permissions(logfile,
2133                                 uid=portage.portage_uid, gid=portage.portage_gid,
2134                                 mode=0660)
2135
2136                         if not self.background:
2137                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2138
2139                         output_handler = self._output_handler
2140
2141                 else:
2142
2143                         # Create a dummy pipe so the scheduler can monitor
2144                         # the process from inside a poll() loop.
2145                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2146                         if self.background:
2147                                 fd_pipes[1] = slave_fd
2148                                 fd_pipes[2] = slave_fd
2149                         output_handler = self._dummy_handler
2150
2151                 kwargs = {}
2152                 for k in self._spawn_kwarg_names:
2153                         v = getattr(self, k)
2154                         if v is not None:
2155                                 kwargs[k] = v
2156
2157                 kwargs["fd_pipes"] = fd_pipes
2158                 kwargs["returnpid"] = True
2159                 kwargs.pop("logfile", None)
2160
2161                 retval = self._spawn(self.args, **kwargs)
2162
2163                 os.close(slave_fd)
2164                 if null_input is not None:
2165                         null_input.close()
2166
2167                 if isinstance(retval, int):
2168                         # spawn failed
2169                         for f in files.values():
2170                                 f.close()
2171                         self.returncode = retval
2172                         self.wait()
2173                         return
2174
2175                 self.pid = retval[0]
2176                 portage.process.spawned_pids.remove(self.pid)
2177
2178                 self._reg_id = self.scheduler.register(files.process.fileno(),
2179                         PollConstants.POLLIN, output_handler)
2180                 self._registered = True
2181
2182         def _pipe(self, fd_pipes):
2183                 """
2184                 @type fd_pipes: dict
2185                 @param fd_pipes: pipes from which to copy terminal size if desired.
2186                 """
2187                 return os.pipe()
2188
2189         def _spawn(self, args, **kwargs):
2190                 return portage.process.spawn(args, **kwargs)
2191
2192         def _output_handler(self, fd, event):
2193                 files = self._files
2194                 buf = array.array('B')
2195                 try:
2196                         buf.fromfile(files.process, self._bufsize)
2197                 except EOFError:
2198                         pass
2199                 if buf:
2200                         if not self.background:
2201                                 buf.tofile(files.stdout)
2202                                 files.stdout.flush()
2203                         buf.tofile(files.log)
2204                         files.log.flush()
2205                 else:
2206                         self._unregister()
2207                         self.wait()
2208                 return self._registered
2209
2210         def _dummy_handler(self, fd, event):
2211                 """
2212                 This method is mainly interested in detecting EOF, since
2213                 the only purpose of the pipe is to allow the scheduler to
2214                 monitor the process from inside a poll() loop.
2215                 """
2216                 files = self._files
2217                 buf = array.array('B')
2218                 try:
2219                         buf.fromfile(files.process, self._bufsize)
2220                 except EOFError:
2221                         pass
2222                 if buf:
2223                         pass
2224                 else:
2225                         self._unregister()
2226                         self.wait()
2227                 return self._registered
2228
2229 class MiscFunctionsProcess(SpawnProcess):
2230         """
2231         Spawns misc-functions.sh with an existing ebuild environment.
2232         """
2233
2234         __slots__ = ("commands", "phase", "pkg", "settings")
2235
2236         def _start(self):
2237                 settings = self.settings
2238                 settings.pop("EBUILD_PHASE", None)
2239                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2240                 misc_sh_binary = os.path.join(portage_bin_path,
2241                         os.path.basename(portage.const.MISC_SH_BINARY))
2242
2243                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2244                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2245
2246                 portage._doebuild_exit_status_unlink(
2247                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2248
2249                 SpawnProcess._start(self)
2250
2251         def _spawn(self, args, **kwargs):
2252                 settings = self.settings
2253                 debug = settings.get("PORTAGE_DEBUG") == "1"
2254                 return portage.spawn(" ".join(args), settings,
2255                         debug=debug, **kwargs)
2256
2257         def _set_returncode(self, wait_retval):
2258                 SpawnProcess._set_returncode(self, wait_retval)
2259                 self.returncode = portage._doebuild_exit_status_check_and_log(
2260                         self.settings, self.phase, self.returncode)
2261
2262 class EbuildFetcher(SpawnProcess):
2263
2264         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2265                 ("_build_dir",)
2266
2267         def _start(self):
2268
2269                 root_config = self.pkg.root_config
2270                 portdb = root_config.trees["porttree"].dbapi
2271                 ebuild_path = portdb.findname(self.pkg.cpv)
2272                 settings = self.config_pool.allocate()
2273                 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2274                 self._build_dir.lock()
2275                 self._build_dir.clean()
2276                 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2277                 if self.logfile is None:
2278                         self.logfile = settings.get("PORTAGE_LOG_FILE")
2279
2280                 phase = "fetch"
2281                 if self.fetchall:
2282                         phase = "fetchall"
2283
2284                 # If any incremental variables have been overridden
2285                 # via the environment, those values need to be passed
2286                 # along here so that they are correctly considered by
2287                 # the config instance in the subproccess.
2288                 fetch_env = os.environ.copy()
2289
2290                 fetch_env["PORTAGE_NICENESS"] = "0"
2291                 if self.prefetch:
2292                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2293
2294                 ebuild_binary = os.path.join(
2295                         settings["PORTAGE_BIN_PATH"], "ebuild")
2296
2297                 fetch_args = [ebuild_binary, ebuild_path, phase]
2298                 debug = settings.get("PORTAGE_DEBUG") == "1"
2299                 if debug:
2300                         fetch_args.append("--debug")
2301
2302                 self.args = fetch_args
2303                 self.env = fetch_env
2304                 SpawnProcess._start(self)
2305
2306         def _pipe(self, fd_pipes):
2307                 """When appropriate, use a pty so that fetcher progress bars,
2308                 like wget has, will work properly."""
2309                 if self.background or not sys.stdout.isatty():
2310                         # When the output only goes to a log file,
2311                         # there's no point in creating a pty.
2312                         return os.pipe()
2313                 stdout_pipe = fd_pipes.get(1)
2314                 got_pty, master_fd, slave_fd = \
2315                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2316                 return (master_fd, slave_fd)
2317
2318         def _set_returncode(self, wait_retval):
2319                 SpawnProcess._set_returncode(self, wait_retval)
2320                 # Collect elog messages that might have been
2321                 # created by the pkg_nofetch phase.
2322                 if self._build_dir is not None:
2323                         # Skip elog messages for prefetch, in order to avoid duplicates.
2324                         if not self.prefetch and self.returncode != os.EX_OK:
2325                                 elog_out = None
2326                                 if self.logfile is not None:
2327                                         if self.background:
2328                                                 elog_out = open(self.logfile, 'a')
2329                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2330                                 if self.logfile is not None:
2331                                         msg += ", Log file:"
2332                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2333                                 if self.logfile is not None:
2334                                         eerror(" '%s'" % (self.logfile,),
2335                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2336                                 if elog_out is not None:
2337                                         elog_out.close()
2338                         if not self.prefetch:
2339                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2340                         features = self._build_dir.settings.features
2341                         if self.returncode == os.EX_OK:
2342                                 self._build_dir.clean()
2343                         self._build_dir.unlock()
2344                         self.config_pool.deallocate(self._build_dir.settings)
2345                         self._build_dir = None
2346
2347 class EbuildBuildDir(SlotObject):
2348
2349         __slots__ = ("dir_path", "pkg", "settings",
2350                 "locked", "_catdir", "_lock_obj")
2351
2352         def __init__(self, **kwargs):
2353                 SlotObject.__init__(self, **kwargs)
2354                 self.locked = False
2355
2356         def lock(self):
2357                 """
2358                 This raises an AlreadyLocked exception if lock() is called
2359                 while a lock is already held. In order to avoid this, call
2360                 unlock() or check whether the "locked" attribute is True
2361                 or False before calling lock().
2362                 """
2363                 if self._lock_obj is not None:
2364                         raise self.AlreadyLocked((self._lock_obj,))
2365
2366                 dir_path = self.dir_path
2367                 if dir_path is None:
2368                         root_config = self.pkg.root_config
2369                         portdb = root_config.trees["porttree"].dbapi
2370                         ebuild_path = portdb.findname(self.pkg.cpv)
2371                         settings = self.settings
2372                         settings.setcpv(self.pkg)
2373                         debug = settings.get("PORTAGE_DEBUG") == "1"
2374                         use_cache = 1 # always true
2375                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2376                                 self.settings, debug, use_cache, portdb)
2377                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2378
2379                 catdir = os.path.dirname(dir_path)
2380                 self._catdir = catdir
2381
2382                 portage.util.ensure_dirs(os.path.dirname(catdir),
2383                         gid=portage.portage_gid,
2384                         mode=070, mask=0)
2385                 catdir_lock = None
2386                 try:
2387                         catdir_lock = portage.locks.lockdir(catdir)
2388                         portage.util.ensure_dirs(catdir,
2389                                 gid=portage.portage_gid,
2390                                 mode=070, mask=0)
2391                         self._lock_obj = portage.locks.lockdir(dir_path)
2392                 finally:
2393                         self.locked = self._lock_obj is not None
2394                         if catdir_lock is not None:
2395                                 portage.locks.unlockdir(catdir_lock)
2396
2397         def clean(self):
2398                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2399                 by keepwork or keeptemp in FEATURES."""
2400                 settings = self.settings
2401                 features = settings.features
2402                 if not ("keepwork" in features or "keeptemp" in features):
2403                         try:
2404                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2405                         except EnvironmentError, e:
2406                                 if e.errno != errno.ENOENT:
2407                                         raise
2408                                 del e
2409
2410         def unlock(self):
2411                 if self._lock_obj is None:
2412                         return
2413
2414                 portage.locks.unlockdir(self._lock_obj)
2415                 self._lock_obj = None
2416                 self.locked = False
2417
2418                 catdir = self._catdir
2419                 catdir_lock = None
2420                 try:
2421                         catdir_lock = portage.locks.lockdir(catdir)
2422                 finally:
2423                         if catdir_lock:
2424                                 try:
2425                                         os.rmdir(catdir)
2426                                 except OSError, e:
2427                                         if e.errno not in (errno.ENOENT,
2428                                                 errno.ENOTEMPTY, errno.EEXIST):
2429                                                 raise
2430                                         del e
2431                                 portage.locks.unlockdir(catdir_lock)
2432
2433         class AlreadyLocked(portage.exception.PortageException):
2434                 pass
2435
2436 class EbuildBuild(CompositeTask):
2437
2438         __slots__ = ("args_set", "config_pool", "find_blockers",
2439                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2440                 "prefetcher", "settings", "world_atom") + \
2441                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2442
2443         def _start(self):
2444
2445                 logger = self.logger
2446                 opts = self.opts
2447                 pkg = self.pkg
2448                 settings = self.settings
2449                 world_atom = self.world_atom
2450                 root_config = pkg.root_config
2451                 tree = "porttree"
2452                 self._tree = tree
2453                 portdb = root_config.trees[tree].dbapi
2454                 settings["EMERGE_FROM"] = pkg.type_name
2455                 settings.backup_changes("EMERGE_FROM")
2456                 settings.reset()
2457                 ebuild_path = portdb.findname(self.pkg.cpv)
2458                 self._ebuild_path = ebuild_path
2459
2460                 prefetcher = self.prefetcher
2461                 if prefetcher is None:
2462                         pass
2463                 elif not prefetcher.isAlive():
2464                         prefetcher.cancel()
2465                 elif prefetcher.poll() is None:
2466
2467                         waiting_msg = "Fetching files " + \
2468                                 "in the background. " + \
2469                                 "To view fetch progress, run `tail -f " + \
2470                                 "/var/log/emerge-fetch.log` in another " + \
2471                                 "terminal."
2472                         msg_prefix = colorize("GOOD", " * ")
2473                         from textwrap import wrap
2474                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2475                                 for line in wrap(waiting_msg, 65))
2476                         if not self.background:
2477                                 writemsg(waiting_msg, noiselevel=-1)
2478
2479                         self._current_task = prefetcher
2480                         prefetcher.addExitListener(self._prefetch_exit)
2481                         return
2482
2483                 self._prefetch_exit(prefetcher)
2484
2485         def _prefetch_exit(self, prefetcher):
2486
2487                 opts = self.opts
2488                 pkg = self.pkg
2489                 settings = self.settings
2490
2491                 if opts.fetchonly:
2492                                 fetcher = EbuildFetchonly(
2493                                         fetch_all=opts.fetch_all_uri,
2494                                         pkg=pkg, pretend=opts.pretend,
2495                                         settings=settings)
2496                                 retval = fetcher.execute()
2497                                 self.returncode = retval
2498                                 self.wait()
2499                                 return
2500
2501                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2502                         fetchall=opts.fetch_all_uri,
2503                         fetchonly=opts.fetchonly,
2504                         background=self.background,
2505                         pkg=pkg, scheduler=self.scheduler)
2506
2507                 self._start_task(fetcher, self._fetch_exit)
2508
2509         def _fetch_exit(self, fetcher):
2510                 opts = self.opts
2511                 pkg = self.pkg
2512
2513                 fetch_failed = False
2514                 if opts.fetchonly:
2515                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2516                 else:
2517                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2518
2519                 if fetch_failed and fetcher.logfile is not None and \
2520                         os.path.exists(fetcher.logfile):
2521                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2522
2523                 if not fetch_failed and fetcher.logfile is not None:
2524                         # Fetch was successful, so remove the fetch log.
2525                         try:
2526                                 os.unlink(fetcher.logfile)
2527                         except OSError:
2528                                 pass
2529
2530                 if fetch_failed or opts.fetchonly:
2531                         self.wait()
2532                         return
2533
2534                 logger = self.logger
2535                 opts = self.opts
2536                 pkg_count = self.pkg_count
2537                 scheduler = self.scheduler
2538                 settings = self.settings
2539                 features = settings.features
2540                 ebuild_path = self._ebuild_path
2541                 system_set = pkg.root_config.sets["system"]
2542
2543                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2544                 self._build_dir.lock()
2545
2546                 # Cleaning is triggered before the setup
2547                 # phase, in portage.doebuild().
2548                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2549                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2550                 short_msg = "emerge: (%s of %s) %s Clean" % \
2551                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2552                 logger.log(msg, short_msg=short_msg)
2553
2554                 #buildsyspkg: Check if we need to _force_ binary package creation
2555                 self._issyspkg = "buildsyspkg" in features and \
2556                                 system_set.findAtomForPackage(pkg) and \
2557                                 not opts.buildpkg
2558
2559                 if opts.buildpkg or self._issyspkg:
2560
2561                         self._buildpkg = True
2562
2563                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2564                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2565                         short_msg = "emerge: (%s of %s) %s Compile" % \
2566                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2567                         logger.log(msg, short_msg=short_msg)
2568
2569                 else:
2570                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2571                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2572                         short_msg = "emerge: (%s of %s) %s Compile" % \
2573                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2574                         logger.log(msg, short_msg=short_msg)
2575
2576                 build = EbuildExecuter(background=self.background, pkg=pkg,
2577                         scheduler=scheduler, settings=settings)
2578                 self._start_task(build, self._build_exit)
2579
2580         def _unlock_builddir(self):
2581                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2582                 self._build_dir.unlock()
2583
2584         def _build_exit(self, build):
2585                 if self._default_exit(build) != os.EX_OK:
2586                         self._unlock_builddir()
2587                         self.wait()
2588                         return
2589
2590                 opts = self.opts
2591                 buildpkg = self._buildpkg
2592
2593                 if not buildpkg:
2594                         self._final_exit(build)
2595                         self.wait()
2596                         return
2597
2598                 if self._issyspkg:
2599                         msg = ">>> This is a system package, " + \
2600                                 "let's pack a rescue tarball.\n"
2601
2602                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2603                         if log_path is not None:
2604                                 log_file = open(log_path, 'a')
2605                                 try:
2606                                         log_file.write(msg)
2607                                 finally:
2608                                         log_file.close()
2609
2610                         if not self.background:
2611                                 portage.writemsg_stdout(msg, noiselevel=-1)
2612
2613                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2614                         scheduler=self.scheduler, settings=self.settings)
2615
2616                 self._start_task(packager, self._buildpkg_exit)
2617
2618         def _buildpkg_exit(self, packager):
2619                 """
2620                 Released build dir lock when there is a failure or
2621                 when in buildpkgonly mode. Otherwise, the lock will
2622                 be released when merge() is called.
2623                 """
2624
2625                 if self._default_exit(packager) == os.EX_OK and \
2626                         self.opts.buildpkgonly:
2627                         # Need to call "clean" phase for buildpkgonly mode
2628                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2629                         phase = "clean"
2630                         clean_phase = EbuildPhase(background=self.background,
2631                                 pkg=self.pkg, phase=phase,
2632                                 scheduler=self.scheduler, settings=self.settings,
2633                                 tree=self._tree)
2634                         self._start_task(clean_phase, self._clean_exit)
2635                         return
2636
2637                 if self._final_exit(packager) != os.EX_OK or \
2638                         self.opts.buildpkgonly:
2639                         self._unlock_builddir()
2640                 self.wait()
2641
2642         def _clean_exit(self, clean_phase):
2643                 if self._final_exit(clean_phase) != os.EX_OK or \
2644                         self.opts.buildpkgonly:
2645                         self._unlock_builddir()
2646                 self.wait()
2647
2648         def install(self):
2649                 """
2650                 Install the package and then clean up and release locks.
2651                 Only call this after the build has completed successfully
2652                 and neither fetchonly nor buildpkgonly mode are enabled.
2653                 """
2654
2655                 find_blockers = self.find_blockers
2656                 ldpath_mtimes = self.ldpath_mtimes
2657                 logger = self.logger
2658                 pkg = self.pkg
2659                 pkg_count = self.pkg_count
2660                 settings = self.settings
2661                 world_atom = self.world_atom
2662                 ebuild_path = self._ebuild_path
2663                 tree = self._tree
2664
2665                 merge = EbuildMerge(find_blockers=self.find_blockers,
2666                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2667                         pkg_count=pkg_count, pkg_path=ebuild_path,
2668                         scheduler=self.scheduler,
2669                         settings=settings, tree=tree, world_atom=world_atom)
2670
2671                 msg = " === (%s of %s) Merging (%s::%s)" % \
2672                         (pkg_count.curval, pkg_count.maxval,
2673                         pkg.cpv, ebuild_path)
2674                 short_msg = "emerge: (%s of %s) %s Merge" % \
2675                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2676                 logger.log(msg, short_msg=short_msg)
2677
2678                 try:
2679                         rval = merge.execute()
2680                 finally:
2681                         self._unlock_builddir()
2682
2683                 return rval
2684
2685 class EbuildExecuter(CompositeTask):
2686
2687         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2688
2689         _phases = ("prepare", "configure", "compile", "test", "install")
2690
2691         _live_eclasses = frozenset([
2692                 "bzr",
2693                 "cvs",
2694                 "darcs",
2695                 "git",
2696                 "mercurial",
2697                 "subversion"
2698         ])
2699
2700         def _start(self):
2701                 self._tree = "porttree"
2702                 pkg = self.pkg
2703                 phase = "clean"
2704                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2705                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2706                 self._start_task(clean_phase, self._clean_phase_exit)
2707
2708         def _clean_phase_exit(self, clean_phase):
2709
2710                 if self._default_exit(clean_phase) != os.EX_OK:
2711                         self.wait()
2712                         return
2713
2714                 pkg = self.pkg
2715                 scheduler = self.scheduler
2716                 settings = self.settings
2717                 cleanup = 1
2718
2719                 # This initializes PORTAGE_LOG_FILE.
2720                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2721
2722                 setup_phase = EbuildPhase(background=self.background,
2723                         pkg=pkg, phase="setup", scheduler=scheduler,
2724                         settings=settings, tree=self._tree)
2725
2726                 setup_phase.addExitListener(self._setup_exit)
2727                 self._current_task = setup_phase
2728                 self.scheduler.scheduleSetup(setup_phase)
2729
2730         def _setup_exit(self, setup_phase):
2731
2732                 if self._default_exit(setup_phase) != os.EX_OK:
2733                         self.wait()
2734                         return
2735
2736                 unpack_phase = EbuildPhase(background=self.background,
2737                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2738                         settings=self.settings, tree=self._tree)
2739
2740                 if self._live_eclasses.intersection(self.pkg.inherited):
2741                         # Serialize $DISTDIR access for live ebuilds since
2742                         # otherwise they can interfere with eachother.
2743
2744                         unpack_phase.addExitListener(self._unpack_exit)
2745                         self._current_task = unpack_phase
2746                         self.scheduler.scheduleUnpack(unpack_phase)
2747
2748                 else:
2749                         self._start_task(unpack_phase, self._unpack_exit)
2750
2751         def _unpack_exit(self, unpack_phase):
2752
2753                 if self._default_exit(unpack_phase) != os.EX_OK:
2754                         self.wait()
2755                         return
2756
2757                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2758
2759                 pkg = self.pkg
2760                 phases = self._phases
2761                 eapi = pkg.metadata["EAPI"]
2762                 if eapi in ("0", "1", "2_pre1"):
2763                         # skip src_prepare and src_configure
2764                         phases = phases[2:]
2765                 elif eapi in ("2_pre2",):
2766                         # skip src_prepare
2767                         phases = phases[1:]
2768
2769                 for phase in phases:
2770                         ebuild_phases.add(EbuildPhase(background=self.background,
2771                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2772                                 settings=self.settings, tree=self._tree))
2773
2774                 self._start_task(ebuild_phases, self._default_final_exit)
2775
2776 class EbuildMetadataPhase(SubProcess):
2777
2778         """
2779         Asynchronous interface for the ebuild "depend" phase which is
2780         used to extract metadata from the ebuild.
2781         """
2782
2783         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2784                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2785                 ("_raw_metadata",)
2786
2787         _file_names = ("ebuild",)
2788         _files_dict = slot_dict_class(_file_names, prefix="")
2789         _bufsize = SpawnProcess._bufsize
2790         _metadata_fd = 9
2791
2792         def _start(self):
2793                 settings = self.settings
2794                 settings.reset()
2795                 ebuild_path = self.ebuild_path
2796                 debug = settings.get("PORTAGE_DEBUG") == "1"
2797                 master_fd = None
2798                 slave_fd = None
2799                 fd_pipes = None
2800                 if self.fd_pipes is not None:
2801                         fd_pipes = self.fd_pipes.copy()
2802                 else:
2803                         fd_pipes = {}
2804
2805                 fd_pipes.setdefault(0, sys.stdin.fileno())
2806                 fd_pipes.setdefault(1, sys.stdout.fileno())
2807                 fd_pipes.setdefault(2, sys.stderr.fileno())
2808
2809                 # flush any pending output
2810                 for fd in fd_pipes.itervalues():
2811                         if fd == sys.stdout.fileno():
2812                                 sys.stdout.flush()
2813                         if fd == sys.stderr.fileno():
2814                                 sys.stderr.flush()
2815
2816                 fd_pipes_orig = fd_pipes.copy()
2817                 self._files = self._files_dict()
2818                 files = self._files
2819
2820                 master_fd, slave_fd = os.pipe()
2821                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2822                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2823
2824                 fd_pipes[self._metadata_fd] = slave_fd
2825
2826                 retval = portage.doebuild(ebuild_path, "depend",
2827                         settings["ROOT"], settings, debug,
2828                         mydbapi=self.portdb, tree="porttree",
2829                         fd_pipes=fd_pipes, returnpid=True)
2830
2831                 os.close(slave_fd)
2832
2833                 if isinstance(retval, int):
2834                         # doebuild failed before spawning
2835                         os.close(master_fd)
2836                         self.returncode = retval
2837                         self.wait()
2838                         return
2839
2840                 self.pid = retval[0]
2841                 portage.process.spawned_pids.remove(self.pid)
2842
2843                 self._raw_metadata = []
2844                 files.ebuild = os.fdopen(master_fd, 'r')
2845                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2846                         PollConstants.POLLIN, self._output_handler)
2847                 self._registered = True
2848
2849         def _output_handler(self, fd, event):
2850                 files = self._files
2851                 self._raw_metadata.append(files.ebuild.read())
2852                 if not self._raw_metadata[-1]:
2853                         self._unregister()
2854                         self.wait()
2855
2856                         if self.returncode == os.EX_OK:
2857                                 metadata = izip(portage.auxdbkeys,
2858                                         "".join(self._raw_metadata).splitlines())
2859                                 self.metadata_callback(self.cpv, self.ebuild_path,
2860                                         self.repo_path, metadata, self.ebuild_mtime)
2861
2862                 return self._registered
2863
2864 class EbuildProcess(SpawnProcess):
2865
2866         __slots__ = ("phase", "pkg", "settings", "tree")
2867
2868         def _start(self):
2869                 # Don't open the log file during the clean phase since the
2870                 # open file can result in an nfs lock on $T/build.log which
2871                 # prevents the clean phase from removing $T.
2872                 if self.phase not in ("clean", "cleanrm"):
2873                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2874                 SpawnProcess._start(self)
2875
2876         def _pipe(self, fd_pipes):
2877                 stdout_pipe = fd_pipes.get(1)
2878                 got_pty, master_fd, slave_fd = \
2879                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2880                 return (master_fd, slave_fd)
2881
2882         def _spawn(self, args, **kwargs):
2883
2884                 root_config = self.pkg.root_config
2885                 tree = self.tree
2886                 mydbapi = root_config.trees[tree].dbapi
2887                 settings = self.settings
2888                 ebuild_path = settings["EBUILD"]
2889                 debug = settings.get("PORTAGE_DEBUG") == "1"
2890
2891                 rval = portage.doebuild(ebuild_path, self.phase,
2892                         root_config.root, settings, debug,
2893                         mydbapi=mydbapi, tree=tree, **kwargs)
2894
2895                 return rval
2896
2897         def _set_returncode(self, wait_retval):
2898                 SpawnProcess._set_returncode(self, wait_retval)
2899
2900                 if self.phase not in ("clean", "cleanrm"):
2901                         self.returncode = portage._doebuild_exit_status_check_and_log(
2902                                 self.settings, self.phase, self.returncode)
2903
2904                 portage._post_phase_userpriv_perms(self.settings)
2905
2906 class EbuildPhase(CompositeTask):
2907
2908         __slots__ = ("background", "pkg", "phase",
2909                 "scheduler", "settings", "tree")
2910
2911         _post_phase_cmds = portage._post_phase_cmds
2912
2913         def _start(self):
2914
2915                 ebuild_process = EbuildProcess(background=self.background,
2916                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
2917                         settings=self.settings, tree=self.tree)
2918
2919                 self._start_task(ebuild_process, self._ebuild_exit)
2920
2921         def _ebuild_exit(self, ebuild_process):
2922
2923                 if self.phase == "install":
2924                         out = None
2925                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2926                         log_file = None
2927                         if self.background and log_path is not None:
2928                                 log_file = open(log_path, 'a')
2929                                 out = log_file
2930                         try:
2931                                 portage._check_build_log(self.settings, out=out)
2932                         finally:
2933                                 if log_file is not None:
2934                                         log_file.close()
2935
2936                 if self._default_exit(ebuild_process) != os.EX_OK:
2937                         self.wait()
2938                         return
2939
2940                 settings = self.settings
2941
2942                 if self.phase == "install":
2943                         portage._post_src_install_uid_fix(settings)
2944
2945                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
2946                 if post_phase_cmds is not None:
2947                         post_phase = MiscFunctionsProcess(background=self.background,
2948                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
2949                                 scheduler=self.scheduler, settings=settings)
2950                         self._start_task(post_phase, self._post_phase_exit)
2951                         return
2952
2953                 self.returncode = ebuild_process.returncode
2954                 self._current_task = None
2955                 self.wait()
2956
2957         def _post_phase_exit(self, post_phase):
2958                 if self._final_exit(post_phase) != os.EX_OK:
2959                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
2960                                 noiselevel=-1)
2961                 self._current_task = None
2962                 self.wait()
2963                 return
2964
2965 class EbuildBinpkg(EbuildProcess):
2966         """
2967         This assumes that src_install() has successfully completed.
2968         """
2969         __slots__ = ("_binpkg_tmpfile",)
2970
2971         def _start(self):
2972                 self.phase = "package"
2973                 self.tree = "porttree"
2974                 pkg = self.pkg
2975                 root_config = pkg.root_config
2976                 portdb = root_config.trees["porttree"].dbapi
2977                 bintree = root_config.trees["bintree"]
2978                 ebuild_path = portdb.findname(self.pkg.cpv)
2979                 settings = self.settings
2980                 debug = settings.get("PORTAGE_DEBUG") == "1"
2981
2982                 bintree.prevent_collision(pkg.cpv)
2983                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
2984                         pkg.cpv + ".tbz2." + str(os.getpid()))
2985                 self._binpkg_tmpfile = binpkg_tmpfile
2986                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
2987                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
2988
2989                 try:
2990                         EbuildProcess._start(self)
2991                 finally:
2992                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
2993
2994         def _set_returncode(self, wait_retval):
2995                 EbuildProcess._set_returncode(self, wait_retval)
2996
2997                 pkg = self.pkg
2998                 bintree = pkg.root_config.trees["bintree"]
2999                 binpkg_tmpfile = self._binpkg_tmpfile
3000                 if self.returncode == os.EX_OK:
3001                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3002
3003 class EbuildMerge(SlotObject):
3004
3005         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3006                 "pkg", "pkg_count", "pkg_path", "pretend",
3007                 "scheduler", "settings", "tree", "world_atom")
3008
3009         def execute(self):
3010                 root_config = self.pkg.root_config
3011                 settings = self.settings
3012                 retval = portage.merge(settings["CATEGORY"],
3013                         settings["PF"], settings["D"],
3014                         os.path.join(settings["PORTAGE_BUILDDIR"],
3015                         "build-info"), root_config.root, settings,
3016                         myebuild=settings["EBUILD"],
3017                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3018                         vartree=root_config.trees["vartree"],
3019                         prev_mtimes=self.ldpath_mtimes,
3020                         scheduler=self.scheduler,
3021                         blockers=self.find_blockers)
3022
3023                 if retval == os.EX_OK:
3024                         self.world_atom(self.pkg)
3025                         self._log_success()
3026
3027                 return retval
3028
3029         def _log_success(self):
3030                 pkg = self.pkg
3031                 pkg_count = self.pkg_count
3032                 pkg_path = self.pkg_path
3033                 logger = self.logger
3034                 if "noclean" not in self.settings.features:
3035                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3036                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3037                         logger.log((" === (%s of %s) " + \
3038                                 "Post-Build Cleaning (%s::%s)") % \
3039                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3040                                 short_msg=short_msg)
3041                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3042                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3043
3044 class PackageUninstall(AsynchronousTask):
3045
3046         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3047
3048         def _start(self):
3049                 try:
3050                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3051                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3052                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3053                                 writemsg_level=self._writemsg_level)
3054                 except UninstallFailure, e:
3055                         self.returncode = e.status
3056                 else:
3057                         self.returncode = os.EX_OK
3058                 self.wait()
3059
3060         def _writemsg_level(self, msg, level=0, noiselevel=0):
3061
3062                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3063                 background = self.background
3064
3065                 if log_path is None:
3066                         if not (background and level < logging.WARNING):
3067                                 portage.util.writemsg_level(msg,
3068                                         level=level, noiselevel=noiselevel)
3069                 else:
3070                         if not background:
3071                                 portage.util.writemsg_level(msg,
3072                                         level=level, noiselevel=noiselevel)
3073
3074                         f = open(log_path, 'a')
3075                         try:
3076                                 f.write(msg)
3077                         finally:
3078                                 f.close()
3079
3080 class Binpkg(CompositeTask):
3081
3082         __slots__ = ("find_blockers",
3083                 "ldpath_mtimes", "logger", "opts",
3084                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3085                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3086                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3087
3088         def _writemsg_level(self, msg, level=0, noiselevel=0):
3089
3090                 if not self.background:
3091                         portage.util.writemsg_level(msg,
3092                                 level=level, noiselevel=noiselevel)
3093
3094                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3095                 if  log_path is not None:
3096                         f = open(log_path, 'a')
3097                         try:
3098                                 f.write(msg)
3099                         finally:
3100                                 f.close()
3101
3102         def _start(self):
3103
3104                 pkg = self.pkg
3105                 settings = self.settings
3106                 settings.setcpv(pkg)
3107                 self._tree = "bintree"
3108                 self._bintree = self.pkg.root_config.trees[self._tree]
3109                 self._verify = "strict" in self.settings.features and \
3110                         not self.opts.pretend
3111
3112                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3113                         "portage", pkg.category, pkg.pf)
3114                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3115                         pkg=pkg, settings=settings)
3116                 self._image_dir = os.path.join(dir_path, "image")
3117                 self._infloc = os.path.join(dir_path, "build-info")
3118                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3119
3120                 # The prefetcher has already completed or it
3121                 # could be running now. If it's running now,
3122                 # wait for it to complete since it holds
3123                 # a lock on the file being fetched. The
3124                 # portage.locks functions are only designed
3125                 # to work between separate processes. Since
3126                 # the lock is held by the current process,
3127                 # use the scheduler and fetcher methods to
3128                 # synchronize with the fetcher.
3129                 prefetcher = self.prefetcher
3130                 if prefetcher is None:
3131                         pass
3132                 elif not prefetcher.isAlive():
3133                         prefetcher.cancel()
3134                 elif prefetcher.poll() is None:
3135
3136                         waiting_msg = ("Fetching '%s' " + \
3137                                 "in the background. " + \
3138                                 "To view fetch progress, run `tail -f " + \
3139                                 "/var/log/emerge-fetch.log` in another " + \
3140                                 "terminal.") % prefetcher.pkg_path
3141                         msg_prefix = colorize("GOOD", " * ")
3142                         from textwrap import wrap
3143                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3144                                 for line in wrap(waiting_msg, 65))
3145                         if not self.background:
3146                                 writemsg(waiting_msg, noiselevel=-1)
3147
3148                         self._current_task = prefetcher
3149                         prefetcher.addExitListener(self._prefetch_exit)
3150                         return
3151
3152                 self._prefetch_exit(prefetcher)
3153
3154         def _prefetch_exit(self, prefetcher):
3155
3156                 pkg = self.pkg
3157                 pkg_count = self.pkg_count
3158                 fetcher = BinpkgFetcher(background=self.background,
3159                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3160                         scheduler=self.scheduler)
3161                 pkg_path = fetcher.pkg_path
3162                 self._pkg_path = pkg_path
3163
3164                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3165
3166                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3167                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3168                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3169                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3170                         self.logger.log(msg, short_msg=short_msg)
3171
3172                         if self.background:
3173                                 fetcher.addExitListener(self._fetcher_exit)
3174                                 self._current_task = fetcher
3175                                 self.scheduler.fetch.schedule(fetcher)
3176                         else:
3177                                 self._start_task(fetcher, self._fetcher_exit)
3178                         return
3179
3180                 self._fetcher_exit(fetcher)
3181
3182         def _fetcher_exit(self, fetcher):
3183
3184                 # The fetcher only has a returncode when
3185                 # --getbinpkg is enabled.
3186                 if fetcher.returncode is not None:
3187                         self._fetched_pkg = True
3188                         if self.opts.fetchonly:
3189                                 self._final_exit(fetcher)
3190                                 self.wait()
3191                                 return
3192                         elif self._default_exit(fetcher) != os.EX_OK:
3193                                 self.wait()
3194                                 return
3195
3196                 verifier = None
3197                 if self._verify:
3198                         verifier = BinpkgVerifier(background=self.background,
3199                                 logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3200
3201                         if self.background:
3202                                 verifier.addExitListener(self._verifier_exit)
3203                                 self._current_task = verifier
3204                                 self.scheduler.fetch.schedule(verifier)
3205                         else:
3206                                 self._start_task(verifier, self._verifier_exit)
3207                         return
3208
3209                 self._verifier_exit(verifier)
3210
3211         def _verifier_exit(self, verifier):
3212                 if verifier is not None and \
3213                         self._default_exit(verifier) != os.EX_OK:
3214                         self.wait()
3215                         return
3216
3217                 logger = self.logger
3218                 pkg = self.pkg
3219                 pkg_count = self.pkg_count
3220                 pkg_path = self._pkg_path
3221
3222                 if self._fetched_pkg:
3223                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3224
3225                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3226                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3227                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3228                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3229                 logger.log(msg, short_msg=short_msg)
3230
3231                 self._build_dir.lock()
3232
3233                 phase = "clean"
3234                 settings = self.settings
3235                 settings.setcpv(pkg)
3236                 settings["EBUILD"] = self._ebuild_path
3237                 ebuild_phase = EbuildPhase(background=self.background,
3238                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3239                         settings=settings, tree=self._tree)
3240
3241                 self._start_task(ebuild_phase, self._clean_exit)
3242
3243         def _clean_exit(self, clean_phase):
3244                 if self._default_exit(clean_phase) != os.EX_OK:
3245                         self._unlock_builddir()
3246                         self.wait()
3247                         return
3248
3249                 dir_path = self._build_dir.dir_path
3250
3251                 try:
3252                         shutil.rmtree(dir_path)
3253                 except (IOError, OSError), e:
3254                         if e.errno != errno.ENOENT:
3255                                 raise
3256                         del e
3257
3258                 infloc = self._infloc
3259                 pkg = self.pkg
3260                 pkg_path = self._pkg_path
3261
3262                 dir_mode = 0755
3263                 for mydir in (dir_path, self._image_dir, infloc):
3264                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3265                                 gid=portage.data.portage_gid, mode=dir_mode)
3266
3267                 # This initializes PORTAGE_LOG_FILE.
3268                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3269                 self._writemsg_level(">>> Extracting info\n")
3270
3271                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3272                 check_missing_metadata = ("CATEGORY", "PF")
3273                 missing_metadata = set()
3274                 for k in check_missing_metadata:
3275                         v = pkg_xpak.getfile(k)
3276                         if not v:
3277                                 missing_metadata.add(k)
3278
3279                 pkg_xpak.unpackinfo(infloc)
3280                 for k in missing_metadata:
3281                         if k == "CATEGORY":
3282                                 v = pkg.category
3283                         elif k == "PF":
3284                                 v = pkg.pf
3285                         else:
3286                                 continue
3287
3288                         f = open(os.path.join(infloc, k), 'wb')
3289                         try:
3290                                 f.write(v + "\n")
3291                         finally:
3292                                 f.close()
3293
3294                 # Store the md5sum in the vdb.
3295                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3296                 try:
3297                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3298                 finally:
3299                         f.close()
3300
3301                 # This gives bashrc users an opportunity to do various things
3302                 # such as remove binary packages after they're installed.
3303                 settings = self.settings
3304                 settings.setcpv(self.pkg)
3305                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3306                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3307
3308                 phase = "setup"
3309                 setup_phase = EbuildPhase(background=self.background,
3310                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3311                         settings=settings, tree=self._tree)
3312
3313                 setup_phase.addExitListener(self._setup_exit)
3314                 self._current_task = setup_phase
3315                 self.scheduler.scheduleSetup(setup_phase)
3316
3317         def _setup_exit(self, setup_phase):
3318                 if self._default_exit(setup_phase) != os.EX_OK:
3319                         self._unlock_builddir()
3320                         self.wait()
3321                         return
3322
3323                 extractor = BinpkgExtractorAsync(background=self.background,
3324                         image_dir=self._image_dir,
3325                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3326                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3327                 self._start_task(extractor, self._extractor_exit)
3328
3329         def _extractor_exit(self, extractor):
3330                 if self._final_exit(extractor) != os.EX_OK:
3331                         self._unlock_builddir()
3332                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3333                                 noiselevel=-1)
3334                 self.wait()
3335
3336         def _unlock_builddir(self):
3337                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3338                 self._build_dir.unlock()
3339
3340         def install(self):
3341
3342                 # This gives bashrc users an opportunity to do various things
3343                 # such as remove binary packages after they're installed.
3344                 settings = self.settings
3345                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3346                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3347
3348                 merge = EbuildMerge(find_blockers=self.find_blockers,
3349                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3350                         pkg=self.pkg, pkg_count=self.pkg_count,
3351                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3352                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3353
3354                 try:
3355                         retval = merge.execute()
3356                 finally:
3357                         settings.pop("PORTAGE_BINPKG_FILE", None)
3358                         self._unlock_builddir()
3359                 return retval
3360
3361 class BinpkgFetcher(SpawnProcess):
3362
3363         __slots__ = ("pkg",
3364                 "locked", "pkg_path", "_lock_obj")
3365
3366         def __init__(self, **kwargs):
3367                 SpawnProcess.__init__(self, **kwargs)
3368                 pkg = self.pkg
3369                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3370
3371         def _start(self):
3372
3373                 if self.cancelled:
3374                         return
3375
3376                 pkg = self.pkg
3377                 bintree = pkg.root_config.trees["bintree"]
3378                 settings = bintree.settings
3379                 use_locks = "distlocks" in settings.features
3380                 pkg_path = self.pkg_path
3381                 resume = os.path.exists(pkg_path)
3382
3383                 # urljoin doesn't work correctly with
3384                 # unrecognized protocols like sftp
3385                 if bintree._remote_has_index:
3386                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3387                         if not rel_uri:
3388                                 rel_uri = pkg.cpv + ".tbz2"
3389                         uri = bintree._remote_base_uri.rstrip("/") + \
3390                                 "/" + rel_uri.lstrip("/")
3391                 else:
3392                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3393                                 "/" + pkg.pf + ".tbz2"
3394
3395                 protocol = urlparse.urlparse(uri)[0]
3396                 fcmd_prefix = "FETCHCOMMAND"
3397                 if resume:
3398                         fcmd_prefix = "RESUMECOMMAND"
3399                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3400                 if not fcmd:
3401                         fcmd = settings.get(fcmd_prefix)
3402
3403                 fcmd_vars = {
3404                         "DISTDIR" : os.path.dirname(pkg_path),
3405                         "URI"     : uri,
3406                         "FILE"    : os.path.basename(pkg_path)
3407                 }
3408
3409                 fetch_env = dict(settings.iteritems())
3410                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3411                         for x in shlex.split(fcmd)]
3412
3413                 portage.util.ensure_dirs(os.path.dirname(pkg_path))
3414                 if use_locks:
3415                         self.lock()
3416
3417                 if self.fd_pipes is None:
3418                         self.fd_pipes = {}
3419                 fd_pipes = self.fd_pipes
3420
3421                 # Redirect all output to stdout since some fetchers like
3422                 # wget pollute stderr (if portage detects a problem then it
3423                 # can send it's own message to stderr).
3424                 fd_pipes.setdefault(0, sys.stdin.fileno())
3425                 fd_pipes.setdefault(1, sys.stdout.fileno())
3426                 fd_pipes.setdefault(2, sys.stdout.fileno())
3427
3428                 self.args = fetch_args
3429                 self.env = fetch_env
3430                 SpawnProcess._start(self)
3431
3432         def _set_returncode(self, wait_retval):
3433                 SpawnProcess._set_returncode(self, wait_retval)
3434                 if self.locked:
3435                         self.unlock()
3436
3437         def lock(self):
3438                 """
3439                 This raises an AlreadyLocked exception if lock() is called
3440                 while a lock is already held. In order to avoid this, call
3441                 unlock() or check whether the "locked" attribute is True
3442                 or False before calling lock().
3443                 """
3444                 if self._lock_obj is not None:
3445                         raise self.AlreadyLocked((self._lock_obj,))
3446
3447                 self._lock_obj = portage.locks.lockfile(
3448                         self.pkg_path, wantnewlockfile=1)
3449                 self.locked = True
3450
3451         class AlreadyLocked(portage.exception.PortageException):
3452                 pass
3453
3454         def unlock(self):
3455                 if self._lock_obj is None:
3456                         return
3457                 portage.locks.unlockfile(self._lock_obj)
3458                 self._lock_obj = None
3459                 self.locked = False
3460
3461 class BinpkgVerifier(AsynchronousTask):
3462         __slots__ = ("logfile", "pkg",)
3463
3464         def _start(self):
3465                 """
3466                 Note: Unlike a normal AsynchronousTask.start() method,
3467                 this one does all work is synchronously. The returncode
3468                 attribute will be set before it returns.
3469                 """
3470
3471                 pkg = self.pkg
3472                 root_config = pkg.root_config
3473                 bintree = root_config.trees["bintree"]
3474                 rval = os.EX_OK
3475                 stdout_orig = sys.stdout
3476                 stderr_orig = sys.stderr
3477                 log_file = None
3478                 if self.background and self.logfile is not None:
3479                         log_file = open(self.logfile, 'a')
3480                 try:
3481                         if log_file is not None:
3482                                 sys.stdout = log_file
3483                                 sys.stderr = log_file
3484                         try:
3485                                 bintree.digestCheck(pkg)
3486                         except portage.exception.FileNotFound:
3487                                 writemsg("!!! Fetching Binary failed " + \
3488                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3489                                 rval = 1
3490                         except portage.exception.DigestException, e:
3491                                 writemsg("\n!!! Digest verification failed:\n",
3492                                         noiselevel=-1)
3493                                 writemsg("!!! %s\n" % e.value[0],
3494                                         noiselevel=-1)
3495                                 writemsg("!!! Reason: %s\n" % e.value[1],
3496                                         noiselevel=-1)
3497                                 writemsg("!!! Got: %s\n" % e.value[2],
3498                                         noiselevel=-1)
3499                                 writemsg("!!! Expected: %s\n" % e.value[3],
3500                                         noiselevel=-1)
3501                                 rval = 1
3502                 finally:
3503                         sys.stdout = stdout_orig
3504                         sys.stderr = stderr_orig
3505                         if log_file is not None:
3506                                 log_file.close()
3507
3508                 self.returncode = rval
3509                 self.wait()
3510
3511 class BinpkgExtractorAsync(SpawnProcess):
3512
3513         __slots__ = ("image_dir", "pkg", "pkg_path")
3514
3515         _shell_binary = portage.const.BASH_BINARY
3516
3517         def _start(self):
3518                 self.args = [self._shell_binary, "-c",
3519                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3520                         (portage._shell_quote(self.pkg_path),
3521                         portage._shell_quote(self.image_dir))]
3522
3523                 self.env = self.pkg.root_config.settings.environ()
3524                 SpawnProcess._start(self)
3525
3526 class MergeListItem(CompositeTask):
3527
3528         """
3529         TODO: For parallel scheduling, everything here needs asynchronous
3530         execution support (start, poll, and wait methods).
3531         """
3532
3533         __slots__ = ("args_set",
3534                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3535                 "find_blockers", "logger", "mtimedb", "pkg",
3536                 "pkg_count", "pkg_to_replace", "prefetcher",
3537                 "settings", "statusMessage", "world_atom") + \
3538                 ("_install_task",)
3539
3540         def _start(self):
3541
3542                 pkg = self.pkg
3543                 build_opts = self.build_opts
3544
3545                 if pkg.installed:
3546                         # uninstall,  executed by self.merge()
3547                         self.returncode = os.EX_OK
3548                         self.wait()
3549                         return
3550
3551                 args_set = self.args_set
3552                 find_blockers = self.find_blockers
3553                 logger = self.logger
3554                 mtimedb = self.mtimedb
3555                 pkg_count = self.pkg_count
3556                 scheduler = self.scheduler
3557                 settings = self.settings
3558                 world_atom = self.world_atom
3559                 ldpath_mtimes = mtimedb["ldpath"]
3560
3561                 action_desc = "Emerging"
3562                 preposition = "for"
3563                 if pkg.type_name == "binary":
3564                         action_desc += " binary"
3565
3566                 if build_opts.fetchonly:
3567                         action_desc = "Fetching"
3568
3569                 msg = "%s (%s of %s) %s" % \
3570                         (action_desc,
3571                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3572                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3573                         colorize("GOOD", pkg.cpv))
3574
3575                 if pkg.root != "/":
3576                         msg += " %s %s" % (preposition, pkg.root)
3577
3578                 if not build_opts.pretend:
3579                         self.statusMessage(msg)
3580                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3581                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3582
3583                 if pkg.type_name == "ebuild":
3584
3585                         build = EbuildBuild(args_set=args_set,
3586                                 background=self.background,
3587                                 config_pool=self.config_pool,
3588                                 find_blockers=find_blockers,
3589                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3590                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3591                                 prefetcher=self.prefetcher, scheduler=scheduler,
3592                                 settings=settings, world_atom=world_atom)
3593
3594                         self._install_task = build
3595                         self._start_task(build, self._default_final_exit)
3596                         return
3597
3598                 elif pkg.type_name == "binary":
3599
3600                         binpkg = Binpkg(background=self.background,
3601                                 find_blockers=find_blockers,
3602                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3603                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3604                                 prefetcher=self.prefetcher, settings=settings,
3605                                 scheduler=scheduler, world_atom=world_atom)
3606
3607                         self._install_task = binpkg
3608                         self._start_task(binpkg, self._default_final_exit)
3609                         return
3610
3611         def _poll(self):
3612                 self._install_task.poll()
3613                 return self.returncode
3614
3615         def _wait(self):
3616                 self._install_task.wait()
3617                 return self.returncode
3618
3619         def merge(self):
3620
3621                 pkg = self.pkg
3622                 build_opts = self.build_opts
3623                 find_blockers = self.find_blockers
3624                 logger = self.logger
3625                 mtimedb = self.mtimedb
3626                 pkg_count = self.pkg_count
3627                 prefetcher = self.prefetcher
3628                 scheduler = self.scheduler
3629                 settings = self.settings
3630                 world_atom = self.world_atom
3631                 ldpath_mtimes = mtimedb["ldpath"]
3632
3633                 if pkg.installed:
3634                         if not (build_opts.buildpkgonly or \
3635                                 build_opts.fetchonly or build_opts.pretend):
3636
3637                                 uninstall = PackageUninstall(background=self.background,
3638                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3639                                         pkg=pkg, scheduler=scheduler, settings=settings)
3640
3641                                 uninstall.start()
3642                                 retval = uninstall.wait()
3643                                 if retval != os.EX_OK:
3644                                         return retval
3645                         return os.EX_OK
3646
3647                 if build_opts.fetchonly or \
3648                         build_opts.buildpkgonly:
3649                         return self.returncode
3650
3651                 retval = self._install_task.install()
3652                 return retval
3653
3654 class PackageMerge(AsynchronousTask):
3655         """
3656         TODO: Implement asynchronous merge so that the scheduler can
3657         run while a merge is executing.
3658         """
3659
3660         __slots__ = ("merge",)
3661
3662         def _start(self):
3663
3664                 pkg = self.merge.pkg
3665                 pkg_count = self.merge.pkg_count
3666
3667                 if pkg.installed:
3668                         action_desc = "Uninstalling"
3669                         preposition = "from"
3670                 else:
3671                         action_desc = "Installing"
3672                         preposition = "to"
3673
3674                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3675
3676                 if pkg.root != "/":
3677                         msg += " %s %s" % (preposition, pkg.root)
3678
3679                 if not self.merge.build_opts.fetchonly and \
3680                         not self.merge.build_opts.pretend and \
3681                         not self.merge.build_opts.buildpkgonly:
3682                         self.merge.statusMessage(msg)
3683
3684                 self.returncode = self.merge.merge()
3685                 self.wait()
3686
3687 class DependencyArg(object):
3688         def __init__(self, arg=None, root_config=None):
3689                 self.arg = arg
3690                 self.root_config = root_config
3691
3692         def __str__(self):
3693                 return self.arg
3694
3695 class AtomArg(DependencyArg):
3696         def __init__(self, atom=None, **kwargs):
3697                 DependencyArg.__init__(self, **kwargs)
3698                 self.atom = atom
3699                 if not isinstance(self.atom, portage.dep.Atom):
3700                         self.atom = portage.dep.Atom(self.atom)
3701                 self.set = (self.atom, )
3702
3703 class PackageArg(DependencyArg):
3704         def __init__(self, package=None, **kwargs):
3705                 DependencyArg.__init__(self, **kwargs)
3706                 self.package = package
3707                 self.atom = portage.dep.Atom("=" + package.cpv)
3708                 self.set = (self.atom, )
3709
3710 class SetArg(DependencyArg):
3711         def __init__(self, set=None, **kwargs):
3712                 DependencyArg.__init__(self, **kwargs)
3713                 self.set = set
3714                 self.name = self.arg[len(SETPREFIX):]
3715
3716         def __str__(self):
3717                 return self.name
3718
3719 class Dependency(SlotObject):
3720         __slots__ = ("atom", "blocker", "depth",
3721                 "parent", "onlydeps", "priority", "root")
3722         def __init__(self, **kwargs):
3723                 SlotObject.__init__(self, **kwargs)
3724                 if self.priority is None:
3725                         self.priority = DepPriority()
3726                 if self.depth is None:
3727                         self.depth = 0
3728
3729 class BlockerCache(DictMixin):
3730         """This caches blockers of installed packages so that dep_check does not
3731         have to be done for every single installed package on every invocation of
3732         emerge.  The cache is invalidated whenever it is detected that something
3733         has changed that might alter the results of dep_check() calls:
3734                 1) the set of installed packages (including COUNTER) has changed
3735                 2) the old-style virtuals have changed
3736         """
3737
3738         # Number of uncached packages to trigger cache update, since
3739         # it's wasteful to update it for every vdb change.
3740         _cache_threshold = 5
3741
3742         class BlockerData(object):
3743
3744                 __slots__ = ("__weakref__", "atoms", "counter")
3745
3746                 def __init__(self, counter, atoms):
3747                         self.counter = counter
3748                         self.atoms = atoms
3749
3750         def __init__(self, myroot, vardb):
3751                 self._vardb = vardb
3752                 self._virtuals = vardb.settings.getvirtuals()
3753                 self._cache_filename = os.path.join(myroot,
3754                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3755                 self._cache_version = "1"
3756                 self._cache_data = None
3757                 self._modified = set()
3758                 self._load()
3759
3760         def _load(self):
3761                 try:
3762                         f = open(self._cache_filename)
3763                         mypickle = pickle.Unpickler(f)
3764                         mypickle.find_global = None
3765                         self._cache_data = mypickle.load()
3766                         f.close()
3767                         del f
3768                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3769                         if isinstance(e, pickle.UnpicklingError):
3770                                 writemsg("!!! Error loading '%s': %s\n" % \
3771                                         (self._cache_filename, str(e)), noiselevel=-1)
3772                         del e
3773
3774                 cache_valid = self._cache_data and \
3775                         isinstance(self._cache_data, dict) and \
3776                         self._cache_data.get("version") == self._cache_version and \
3777                         isinstance(self._cache_data.get("blockers"), dict)
3778                 if cache_valid:
3779                         # Validate all the atoms and counters so that
3780                         # corruption is detected as soon as possible.
3781                         invalid_items = set()
3782                         for k, v in self._cache_data["blockers"].iteritems():
3783                                 if not isinstance(k, basestring):
3784                                         invalid_items.add(k)
3785                                         continue
3786                                 try:
3787                                         if portage.catpkgsplit(k) is None:
3788                                                 invalid_items.add(k)
3789                                                 continue
3790                                 except portage.exception.InvalidData:
3791                                         invalid_items.add(k)
3792                                         continue
3793                                 if not isinstance(v, tuple) or \
3794                                         len(v) != 2:
3795                                         invalid_items.add(k)
3796                                         continue
3797                                 counter, atoms = v
3798                                 if not isinstance(counter, (int, long)):
3799                                         invalid_items.add(k)
3800                                         continue
3801                                 if not isinstance(atoms, (list, tuple)):
3802                                         invalid_items.add(k)
3803                                         continue
3804                                 invalid_atom = False
3805                                 for atom in atoms:
3806                                         if not isinstance(atom, basestring):
3807                                                 invalid_atom = True
3808                                                 break
3809                                         if atom[:1] != "!" or \
3810                                                 not portage.isvalidatom(
3811                                                 atom, allow_blockers=True):
3812                                                 invalid_atom = True
3813                                                 break
3814                                 if invalid_atom:
3815                                         invalid_items.add(k)
3816                                         continue
3817
3818                         for k in invalid_items:
3819                                 del self._cache_data["blockers"][k]
3820                         if not self._cache_data["blockers"]:
3821                                 cache_valid = False
3822
3823                 if not cache_valid:
3824                         self._cache_data = {"version":self._cache_version}
3825                         self._cache_data["blockers"] = {}
3826                         self._cache_data["virtuals"] = self._virtuals
3827                 self._modified.clear()
3828
3829         def flush(self):
3830                 """If the current user has permission and the internal blocker cache
3831                 been updated, save it to disk and mark it unmodified.  This is called
3832                 by emerge after it has proccessed blockers for all installed packages.
3833                 Currently, the cache is only written if the user has superuser
3834                 privileges (since that's required to obtain a lock), but all users
3835                 have read access and benefit from faster blocker lookups (as long as
3836                 the entire cache is still valid).  The cache is stored as a pickled
3837                 dict object with the following format:
3838
3839                 {
3840                         version : "1",
3841                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
3842                         "virtuals" : vardb.settings.getvirtuals()
3843                 }
3844                 """
3845                 if len(self._modified) >= self._cache_threshold and \
3846                         secpass >= 2:
3847                         try:
3848                                 f = portage.util.atomic_ofstream(self._cache_filename)
3849                                 pickle.dump(self._cache_data, f, -1)
3850                                 f.close()
3851                                 portage.util.apply_secpass_permissions(
3852                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
3853                         except (IOError, OSError), e:
3854                                 pass
3855                         self._modified.clear()
3856
3857         def __setitem__(self, cpv, blocker_data):
3858                 """
3859                 Update the cache and mark it as modified for a future call to
3860                 self.flush().
3861
3862                 @param cpv: Package for which to cache blockers.
3863                 @type cpv: String
3864                 @param blocker_data: An object with counter and atoms attributes.
3865                 @type blocker_data: BlockerData
3866                 """
3867                 self._cache_data["blockers"][cpv] = \
3868                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
3869                 self._modified.add(cpv)
3870
3871         def __iter__(self):
3872                 return iter(self._cache_data["blockers"])
3873
3874         def __delitem__(self, cpv):
3875                 del self._cache_data["blockers"][cpv]
3876
3877         def __getitem__(self, cpv):
3878                 """
3879                 @rtype: BlockerData
3880                 @returns: An object with counter and atoms attributes.
3881                 """
3882                 return self.BlockerData(*self._cache_data["blockers"][cpv])
3883
3884         def keys(self):
3885                 """This needs to be implemented so that self.__repr__() doesn't raise
3886                 an AttributeError."""
3887                 return list(self)
3888
3889 class BlockerDB(object):
3890
3891         def __init__(self, root_config):
3892                 self._root_config = root_config
3893                 self._vartree = root_config.trees["vartree"]
3894                 self._portdb = root_config.trees["porttree"].dbapi
3895
3896                 self._dep_check_trees = None
3897                 self._fake_vartree = None
3898
3899         def _get_fake_vartree(self, acquire_lock=0):
3900                 fake_vartree = self._fake_vartree
3901                 if fake_vartree is None:
3902                         fake_vartree = FakeVartree(self._root_config,
3903                                 acquire_lock=acquire_lock)
3904                         self._fake_vartree = fake_vartree
3905                         self._dep_check_trees = { self._vartree.root : {
3906                                 "porttree"    :  fake_vartree,
3907                                 "vartree"     :  fake_vartree,
3908                         }}
3909                 else:
3910                         fake_vartree.sync(acquire_lock=acquire_lock)
3911                 return fake_vartree
3912
3913         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
3914                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
3915                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
3916                 settings = self._vartree.settings
3917                 stale_cache = set(blocker_cache)
3918                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
3919                 dep_check_trees = self._dep_check_trees
3920                 vardb = fake_vartree.dbapi
3921                 installed_pkgs = list(vardb)
3922
3923                 for inst_pkg in installed_pkgs:
3924                         stale_cache.discard(inst_pkg.cpv)
3925                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
3926                         if cached_blockers is not None and \
3927                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
3928                                 cached_blockers = None
3929                         if cached_blockers is not None:
3930                                 blocker_atoms = cached_blockers.atoms
3931                         else:
3932                                 # Use aux_get() to trigger FakeVartree global
3933                                 # updates on *DEPEND when appropriate.
3934                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
3935                                 try:
3936                                         portage.dep._dep_check_strict = False
3937                                         success, atoms = portage.dep_check(depstr,
3938                                                 vardb, settings, myuse=inst_pkg.use.enabled,
3939                                                 trees=dep_check_trees, myroot=inst_pkg.root)
3940                                 finally:
3941                                         portage.dep._dep_check_strict = True
3942                                 if not success:
3943                                         pkg_location = os.path.join(inst_pkg.root,
3944                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
3945                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
3946                                                 (pkg_location, atoms), noiselevel=-1)
3947                                         continue
3948
3949                                 blocker_atoms = [atom for atom in atoms \
3950                                         if atom.startswith("!")]
3951                                 blocker_atoms.sort()
3952                                 counter = long(inst_pkg.metadata["COUNTER"])
3953                                 blocker_cache[inst_pkg.cpv] = \
3954                                         blocker_cache.BlockerData(counter, blocker_atoms)
3955                 for cpv in stale_cache:
3956                         del blocker_cache[cpv]
3957                 blocker_cache.flush()
3958
3959                 blocker_parents = digraph()
3960                 blocker_atoms = []
3961                 for pkg in installed_pkgs:
3962                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
3963                                 blocker_atom = blocker_atom.lstrip("!")
3964                                 blocker_atoms.append(blocker_atom)
3965                                 blocker_parents.add(blocker_atom, pkg)
3966
3967                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3968                 blocking_pkgs = set()
3969                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
3970                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
3971
3972                 # Check for blockers in the other direction.
3973                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
3974                 try:
3975                         portage.dep._dep_check_strict = False
3976                         success, atoms = portage.dep_check(depstr,
3977                                 vardb, settings, myuse=new_pkg.use.enabled,
3978                                 trees=dep_check_trees, myroot=new_pkg.root)
3979                 finally:
3980                         portage.dep._dep_check_strict = True
3981                 if not success:
3982                         # We should never get this far with invalid deps.
3983                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
3984                         assert False
3985
3986                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
3987                         if atom[:1] == "!"]
3988                 if blocker_atoms:
3989                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
3990                         for inst_pkg in installed_pkgs:
3991                                 try:
3992                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
3993                                 except (portage.exception.InvalidDependString, StopIteration):
3994                                         continue
3995                                 blocking_pkgs.add(inst_pkg)
3996
3997                 return blocking_pkgs
3998
3999 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4000
4001         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4002                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4003         p_type, p_root, p_key, p_status = parent_node
4004         msg = []
4005         if p_status == "nomerge":
4006                 category, pf = portage.catsplit(p_key)
4007                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4008                 msg.append("Portage is unable to process the dependencies of the ")
4009                 msg.append("'%s' package. " % p_key)
4010                 msg.append("In order to correct this problem, the package ")
4011                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4012                 msg.append("As a temporary workaround, the --nodeps option can ")
4013                 msg.append("be used to ignore all dependencies.  For reference, ")
4014                 msg.append("the problematic dependencies can be found in the ")
4015                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4016         else:
4017                 msg.append("This package can not be installed. ")
4018                 msg.append("Please notify the '%s' package maintainer " % p_key)
4019                 msg.append("about this problem.")
4020
4021         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4022         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4023
4024 class PackageVirtualDbapi(portage.dbapi):
4025         """
4026         A dbapi-like interface class that represents the state of the installed
4027         package database as new packages are installed, replacing any packages
4028         that previously existed in the same slot. The main difference between
4029         this class and fakedbapi is that this one uses Package instances
4030         internally (passed in via cpv_inject() and cpv_remove() calls).
4031         """
4032         def __init__(self, settings):
4033                 portage.dbapi.__init__(self)
4034                 self.settings = settings
4035                 self._match_cache = {}
4036                 self._cp_map = {}
4037                 self._cpv_map = {}
4038
4039         def clear(self):
4040                 """
4041                 Remove all packages.
4042                 """
4043                 if self._cpv_map:
4044                         self._clear_cache()
4045                         self._cp_map.clear()
4046                         self._cpv_map.clear()
4047
4048         def copy(self):
4049                 obj = PackageVirtualDbapi(self.settings)
4050                 obj._match_cache = self._match_cache.copy()
4051                 obj._cp_map = self._cp_map.copy()
4052                 for k, v in obj._cp_map.iteritems():
4053                         obj._cp_map[k] = v[:]
4054                 obj._cpv_map = self._cpv_map.copy()
4055                 return obj
4056
4057         def __iter__(self):
4058                 return self._cpv_map.itervalues()
4059
4060         def __contains__(self, item):
4061                 existing = self._cpv_map.get(item.cpv)
4062                 if existing is not None and \
4063                         existing == item:
4064                         return True
4065                 return False
4066
4067         def get(self, item, default=None):
4068                 cpv = getattr(item, "cpv", None)
4069                 if cpv is None:
4070                         if len(item) != 4:
4071                                 return default
4072                         type_name, root, cpv, operation = item
4073
4074                 existing = self._cpv_map.get(cpv)
4075                 if existing is not None and \
4076                         existing == item:
4077                         return existing
4078                 return default
4079
4080         def match_pkgs(self, atom):
4081                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4082
4083         def _clear_cache(self):
4084                 if self._categories is not None:
4085                         self._categories = None
4086                 if self._match_cache:
4087                         self._match_cache = {}
4088
4089         def match(self, origdep, use_cache=1):
4090                 result = self._match_cache.get(origdep)
4091                 if result is not None:
4092                         return result[:]
4093                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4094                 self._match_cache[origdep] = result
4095                 return result[:]
4096
4097         def cpv_exists(self, cpv):
4098                 return cpv in self._cpv_map
4099
4100         def cp_list(self, mycp, use_cache=1):
4101                 cachelist = self._match_cache.get(mycp)
4102                 # cp_list() doesn't expand old-style virtuals
4103                 if cachelist and cachelist[0].startswith(mycp):
4104                         return cachelist[:]
4105                 cpv_list = self._cp_map.get(mycp)
4106                 if cpv_list is None:
4107                         cpv_list = []
4108                 else:
4109                         cpv_list = [pkg.cpv for pkg in cpv_list]
4110                 self._cpv_sort_ascending(cpv_list)
4111                 if not (not cpv_list and mycp.startswith("virtual/")):
4112                         self._match_cache[mycp] = cpv_list
4113                 return cpv_list[:]
4114
4115         def cp_all(self):
4116                 return list(self._cp_map)
4117
4118         def cpv_all(self):
4119                 return list(self._cpv_map)
4120
4121         def cpv_inject(self, pkg):
4122                 cp_list = self._cp_map.get(pkg.cp)
4123                 if cp_list is None:
4124                         cp_list = []
4125                         self._cp_map[pkg.cp] = cp_list
4126                 e_pkg = self._cpv_map.get(pkg.cpv)
4127                 if e_pkg is not None:
4128                         if e_pkg == pkg:
4129                                 return
4130                         self.cpv_remove(e_pkg)
4131                 for e_pkg in cp_list:
4132                         if e_pkg.slot_atom == pkg.slot_atom:
4133                                 if e_pkg == pkg:
4134                                         return
4135                                 self.cpv_remove(e_pkg)
4136                                 break
4137                 cp_list.append(pkg)
4138                 self._cpv_map[pkg.cpv] = pkg
4139                 self._clear_cache()
4140
4141         def cpv_remove(self, pkg):
4142                 old_pkg = self._cpv_map.get(pkg.cpv)
4143                 if old_pkg != pkg:
4144                         raise KeyError(pkg)
4145                 self._cp_map[pkg.cp].remove(pkg)
4146                 del self._cpv_map[pkg.cpv]
4147                 self._clear_cache()
4148
4149         def aux_get(self, cpv, wants):
4150                 metadata = self._cpv_map[cpv].metadata
4151                 return [metadata.get(x, "") for x in wants]
4152
4153         def aux_update(self, cpv, values):
4154                 self._cpv_map[cpv].metadata.update(values)
4155                 self._clear_cache()
4156
4157 class depgraph(object):
4158
4159         pkg_tree_map = RootConfig.pkg_tree_map
4160
4161         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4162
4163         def __init__(self, settings, trees, myopts, myparams, spinner):
4164                 self.settings = settings
4165                 self.target_root = settings["ROOT"]
4166                 self.myopts = myopts
4167                 self.myparams = myparams
4168                 self.edebug = 0
4169                 if settings.get("PORTAGE_DEBUG", "") == "1":
4170                         self.edebug = 1
4171                 self.spinner = spinner
4172                 self._running_root = trees["/"]["root_config"]
4173                 self._opts_no_restart = Scheduler._opts_no_restart
4174                 self.pkgsettings = {}
4175                 # Maps slot atom to package for each Package added to the graph.
4176                 self._slot_pkg_map = {}
4177                 # Maps nodes to the reasons they were selected for reinstallation.
4178                 self._reinstall_nodes = {}
4179                 self.mydbapi = {}
4180                 self.trees = {}
4181                 self._trees_orig = trees
4182                 self.roots = {}
4183                 # Contains a filtered view of preferred packages that are selected
4184                 # from available repositories.
4185                 self._filtered_trees = {}
4186                 # Contains installed packages and new packages that have been added
4187                 # to the graph.
4188                 self._graph_trees = {}
4189                 # All Package instances
4190                 self._pkg_cache = self._package_cache(self)
4191                 for myroot in trees:
4192                         self.trees[myroot] = {}
4193                         # Create a RootConfig instance that references
4194                         # the FakeVartree instead of the real one.
4195                         self.roots[myroot] = RootConfig(
4196                                 trees[myroot]["vartree"].settings,
4197                                 self.trees[myroot],
4198                                 trees[myroot]["root_config"].setconfig)
4199                         for tree in ("porttree", "bintree"):
4200                                 self.trees[myroot][tree] = trees[myroot][tree]
4201                         self.trees[myroot]["vartree"] = \
4202                                 FakeVartree(trees[myroot]["root_config"],
4203                                         pkg_cache=self._pkg_cache)
4204                         self.pkgsettings[myroot] = portage.config(
4205                                 clone=self.trees[myroot]["vartree"].settings)
4206                         self._slot_pkg_map[myroot] = {}
4207                         vardb = self.trees[myroot]["vartree"].dbapi
4208                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4209                                 "--buildpkgonly" not in self.myopts
4210                         # This fakedbapi instance will model the state that the vdb will
4211                         # have after new packages have been installed.
4212                         fakedb = PackageVirtualDbapi(vardb.settings)
4213                         if preload_installed_pkgs:
4214                                 for pkg in vardb:
4215                                         self.spinner.update()
4216                                         # This triggers metadata updates via FakeVartree.
4217                                         vardb.aux_get(pkg.cpv, [])
4218                                         fakedb.cpv_inject(pkg)
4219
4220                         # Now that the vardb state is cached in our FakeVartree,
4221                         # we won't be needing the real vartree cache for awhile.
4222                         # To make some room on the heap, clear the vardbapi
4223                         # caches.
4224                         trees[myroot]["vartree"].dbapi._clear_cache()
4225                         gc.collect()
4226
4227                         self.mydbapi[myroot] = fakedb
4228                         def graph_tree():
4229                                 pass
4230                         graph_tree.dbapi = fakedb
4231                         self._graph_trees[myroot] = {}
4232                         self._filtered_trees[myroot] = {}
4233                         # Substitute the graph tree for the vartree in dep_check() since we
4234                         # want atom selections to be consistent with package selections
4235                         # have already been made.
4236                         self._graph_trees[myroot]["porttree"]   = graph_tree
4237                         self._graph_trees[myroot]["vartree"]    = graph_tree
4238                         def filtered_tree():
4239                                 pass
4240                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4241                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4242
4243                         # Passing in graph_tree as the vartree here could lead to better
4244                         # atom selections in some cases by causing atoms for packages that
4245                         # have been added to the graph to be preferred over other choices.
4246                         # However, it can trigger atom selections that result in
4247                         # unresolvable direct circular dependencies. For example, this
4248                         # happens with gwydion-dylan which depends on either itself or
4249                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4250                         # gwydion-dylan-bin needs to be selected in order to avoid a
4251                         # an unresolvable direct circular dependency.
4252                         #
4253                         # To solve the problem described above, pass in "graph_db" so that
4254                         # packages that have been added to the graph are distinguishable
4255                         # from other available packages and installed packages. Also, pass
4256                         # the parent package into self._select_atoms() calls so that
4257                         # unresolvable direct circular dependencies can be detected and
4258                         # avoided when possible.
4259                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4260                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4261
4262                         dbs = []
4263                         portdb = self.trees[myroot]["porttree"].dbapi
4264                         bindb  = self.trees[myroot]["bintree"].dbapi
4265                         vardb  = self.trees[myroot]["vartree"].dbapi
4266                         #               (db, pkg_type, built, installed, db_keys)
4267                         if "--usepkgonly" not in self.myopts:
4268                                 db_keys = list(portdb._aux_cache_keys)
4269                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4270                         if "--usepkg" in self.myopts:
4271                                 db_keys = list(bindb._aux_cache_keys)
4272                                 dbs.append((bindb,  "binary", True, False, db_keys))
4273                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4274                         dbs.append((vardb, "installed", True, True, db_keys))
4275                         self._filtered_trees[myroot]["dbs"] = dbs
4276                         if "--usepkg" in self.myopts:
4277                                 self.trees[myroot]["bintree"].populate(
4278                                         "--getbinpkg" in self.myopts,
4279                                         "--getbinpkgonly" in self.myopts)
4280                 del trees
4281
4282                 self.digraph=portage.digraph()
4283                 # contains all sets added to the graph
4284                 self._sets = {}
4285                 # contains atoms given as arguments
4286                 self._sets["args"] = InternalPackageSet()
4287                 # contains all atoms from all sets added to the graph, including
4288                 # atoms given as arguments
4289                 self._set_atoms = InternalPackageSet()
4290                 self._atom_arg_map = {}
4291                 # contains all nodes pulled in by self._set_atoms
4292                 self._set_nodes = set()
4293                 # Contains only Blocker -> Uninstall edges
4294                 self._blocker_uninstalls = digraph()
4295                 # Contains only Package -> Blocker edges
4296                 self._blocker_parents = digraph()
4297                 # Contains only irrelevant Package -> Blocker edges
4298                 self._irrelevant_blockers = digraph()
4299                 # Contains only unsolvable Package -> Blocker edges
4300                 self._unsolvable_blockers = digraph()
4301                 self._slot_collision_info = set()
4302                 # Slot collision nodes are not allowed to block other packages since
4303                 # blocker validation is only able to account for one package per slot.
4304                 self._slot_collision_nodes = set()
4305                 self._serialized_tasks_cache = None
4306                 self._scheduler_graph = None
4307                 self._displayed_list = None
4308                 self._pprovided_args = []
4309                 self._missing_args = []
4310                 self._masked_installed = set()
4311                 self._unsatisfied_deps_for_display = []
4312                 self._unsatisfied_blockers_for_display = None
4313                 self._circular_deps_for_display = None
4314                 self._dep_stack = []
4315                 self._unsatisfied_deps = []
4316                 self._initially_unsatisfied_deps = []
4317                 self._ignored_deps = []
4318                 self._required_set_names = set(["system", "world"])
4319                 self._select_atoms = self._select_atoms_highest_available
4320                 self._select_package = self._select_pkg_highest_available
4321                 self._highest_pkg_cache = {}
4322
4323         def _show_slot_collision_notice(self):
4324                 """Show an informational message advising the user to mask one of the
4325                 the packages. In some cases it may be possible to resolve this
4326                 automatically, but support for backtracking (removal nodes that have
4327                 already been selected) will be required in order to handle all possible
4328                 cases."""
4329
4330                 if not self._slot_collision_info:
4331                         return
4332
4333                 self._show_merge_list()
4334
4335                 msg = []
4336                 msg.append("\n!!! Multiple package instances within a single " + \
4337                         "package slot have been pulled\n")
4338                 msg.append("!!! into the dependency graph, resulting" + \
4339                         " in a slot conflict:\n\n")
4340                 indent = "  "
4341                 # Max number of parents shown, to avoid flooding the display.
4342                 max_parents = 3
4343                 for slot_atom, root in self._slot_collision_info:
4344                         msg.append(str(slot_atom))
4345                         msg.append("\n\n")
4346                         slot_nodes = []
4347                         for node in self._slot_collision_nodes:
4348                                 if node.slot_atom == slot_atom:
4349                                         slot_nodes.append(node)
4350                         slot_nodes.append(self._slot_pkg_map[root][slot_atom])
4351                         for node in slot_nodes:
4352                                 msg.append(indent)
4353                                 msg.append(str(node))
4354                                 parents = self.digraph.parent_nodes(node)
4355                                 if parents:
4356                                         omitted_parents = 0
4357                                         if len(parents) > max_parents:
4358                                                 pruned_list = []
4359                                                 # When generating the pruned list, prefer instances
4360                                                 # of DependencyArg over instances of Package.
4361                                                 for parent in parents:
4362                                                         if isinstance(parent, DependencyArg):
4363                                                                 pruned_list.append(parent)
4364                                                 # Prefer Packages instances that themselves have been
4365                                                 # pulled into collision slots.
4366                                                 for parent in parents:
4367                                                         if isinstance(parent, Package) and \
4368                                                                 (parent.slot_atom, parent.root) \
4369                                                                 in self._slot_collision_info:
4370                                                                 pruned_list.append(parent)
4371                                                 for parent in parents:
4372                                                         if len(pruned_list) >= max_parents:
4373                                                                 break
4374                                                         if not isinstance(parent, DependencyArg) and \
4375                                                                 parent not in pruned_list:
4376                                                                 pruned_list.append(parent)
4377                                                 omitted_parents = len(parents) - len(pruned_list)
4378                                                 parents = pruned_list
4379                                         msg.append(" pulled in by\n")
4380                                         for parent in parents:
4381                                                 msg.append(2*indent)
4382                                                 msg.append(str(parent))
4383                                                 msg.append("\n")
4384                                         if omitted_parents:
4385                                                 msg.append(2*indent)
4386                                                 msg.append("(and %d more)\n" % omitted_parents)
4387                                 else:
4388                                         msg.append(" (no parents)\n")
4389                                 msg.append("\n")
4390                 msg.append("\n")
4391                 sys.stderr.write("".join(msg))
4392                 sys.stderr.flush()
4393
4394                 if "--quiet" in self.myopts:
4395                         return
4396
4397                 msg = []
4398                 msg.append("It may be possible to solve this problem ")
4399                 msg.append("by using package.mask to prevent one of ")
4400                 msg.append("those packages from being selected. ")
4401                 msg.append("However, it is also possible that conflicting ")
4402                 msg.append("dependencies exist such that they are impossible to ")
4403                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4404                 msg.append("the dependencies of two different packages, then those ")
4405                 msg.append("packages can not be installed simultaneously.")
4406
4407                 from formatter import AbstractFormatter, DumbWriter
4408                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4409                 for x in msg:
4410                         f.add_flowing_data(x)
4411                 f.end_paragraph(1)
4412
4413                 msg = []
4414                 msg.append("For more information, see MASKED PACKAGES ")
4415                 msg.append("section in the emerge man page or refer ")
4416                 msg.append("to the Gentoo Handbook.")
4417                 for x in msg:
4418                         f.add_flowing_data(x)
4419                 f.end_paragraph(1)
4420                 f.writer.flush()
4421
4422         def _reinstall_for_flags(self, forced_flags,
4423                 orig_use, orig_iuse, cur_use, cur_iuse):
4424                 """Return a set of flags that trigger reinstallation, or None if there
4425                 are no such flags."""
4426                 if "--newuse" in self.myopts:
4427                         flags = set(orig_iuse.symmetric_difference(
4428                                 cur_iuse).difference(forced_flags))
4429                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4430                                 cur_iuse.intersection(cur_use)))
4431                         if flags:
4432                                 return flags
4433                 elif "changed-use" == self.myopts.get("--reinstall"):
4434                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4435                                 cur_iuse.intersection(cur_use))
4436                         if flags:
4437                                 return flags
4438                 return None
4439
4440         def _create_graph(self, allow_unsatisfied=False):
4441                 dep_stack = self._dep_stack
4442                 while dep_stack:
4443                         self.spinner.update()
4444                         dep = dep_stack.pop()
4445                         if isinstance(dep, Package):
4446                                 if not self._add_pkg_deps(dep,
4447                                         allow_unsatisfied=allow_unsatisfied):
4448                                         return 0
4449                                 continue
4450                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4451                                 return 0
4452                 return 1
4453
4454         def _add_dep(self, dep, allow_unsatisfied=False):
4455                 debug = "--debug" in self.myopts
4456                 buildpkgonly = "--buildpkgonly" in self.myopts
4457                 nodeps = "--nodeps" in self.myopts
4458                 empty = "empty" in self.myparams
4459                 deep = "deep" in self.myparams
4460                 update = "--update" in self.myopts and dep.depth <= 1
4461                 if dep.blocker:
4462                         if not buildpkgonly and \
4463                                 not nodeps and \
4464                                 dep.parent not in self._slot_collision_nodes:
4465                                 if dep.parent.onlydeps:
4466                                         # It's safe to ignore blockers if the
4467                                         # parent is an --onlydeps node.
4468                                         return 1
4469                                 # The blocker applies to the root where
4470                                 # the parent is or will be installed.
4471                                 blocker = Blocker(atom=dep.atom,
4472                                         eapi=dep.parent.metadata["EAPI"],
4473                                         root=dep.parent.root)
4474                                 self._blocker_parents.add(blocker, dep.parent)
4475                         return 1
4476                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4477                         onlydeps=dep.onlydeps)
4478                 if not dep_pkg:
4479                         if allow_unsatisfied:
4480                                 self._unsatisfied_deps.append(dep)
4481                                 return 1
4482                         self._unsatisfied_deps_for_display.append(
4483                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4484                         return 0
4485                 # In some cases, dep_check will return deps that shouldn't
4486                 # be proccessed any further, so they are identified and
4487                 # discarded here. Try to discard as few as possible since
4488                 # discarded dependencies reduce the amount of information
4489                 # available for optimization of merge order.
4490                 if dep.priority.satisfied and \
4491                         not (existing_node or empty or deep or update):
4492                         myarg = None
4493                         if dep.root == self.target_root:
4494                                 try:
4495                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4496                                 except StopIteration:
4497                                         pass
4498                                 except portage.exception.InvalidDependString:
4499                                         if not dep_pkg.installed:
4500                                                 # This shouldn't happen since the package
4501                                                 # should have been masked.
4502                                                 raise
4503                         if not myarg:
4504                                 self._ignored_deps.append(dep)
4505                                 return 1
4506
4507                 if not self._add_pkg(dep_pkg, dep):
4508                         return 0
4509                 return 1
4510
4511         def _add_pkg(self, pkg, dep):
4512                 myparent = None
4513                 priority = None
4514                 depth = 0
4515                 if dep is None:
4516                         dep = Dependency()
4517                 else:
4518                         myparent = dep.parent
4519                         priority = dep.priority
4520                         depth = dep.depth
4521                 if priority is None:
4522                         priority = DepPriority()
4523                 """
4524                 Fills the digraph with nodes comprised of packages to merge.
4525                 mybigkey is the package spec of the package to merge.
4526                 myparent is the package depending on mybigkey ( or None )
4527                 addme = Should we add this package to the digraph or are we just looking at it's deps?
4528                         Think --onlydeps, we need to ignore packages in that case.
4529                 #stuff to add:
4530                 #SLOT-aware emerge
4531                 #IUSE-aware emerge -> USE DEP aware depgraph
4532                 #"no downgrade" emerge
4533                 """
4534
4535                 # select the correct /var database that we'll be checking against
4536                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4537                 pkgsettings = self.pkgsettings[pkg.root]
4538
4539                 args = None
4540                 arg_atoms = None
4541                 if True:
4542                         try:
4543                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4544                         except portage.exception.InvalidDependString, e:
4545                                 if not pkg.installed:
4546                                         show_invalid_depstring_notice(
4547                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4548                                         return 0
4549                                 del e
4550                         else:
4551                                 args = [arg for arg, atom in arg_atoms]
4552
4553                 if not pkg.onlydeps:
4554                         if not pkg.installed and \
4555                                 "empty" not in self.myparams and \
4556                                 vardbapi.match(pkg.slot_atom):
4557                                 # Increase the priority of dependencies on packages that
4558                                 # are being rebuilt. This optimizes merge order so that
4559                                 # dependencies are rebuilt/updated as soon as possible,
4560                                 # which is needed especially when emerge is called by
4561                                 # revdep-rebuild since dependencies may be affected by ABI
4562                                 # breakage that has rendered them useless. Don't adjust
4563                                 # priority here when in "empty" mode since all packages
4564                                 # are being merged in that case.
4565                                 priority.rebuild = True
4566
4567                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4568                         slot_collision = False
4569                         if existing_node:
4570                                 existing_node_matches = pkg.cpv == existing_node.cpv
4571                                 if existing_node_matches and \
4572                                         pkg != existing_node and \
4573                                         dep.atom is not None:
4574                                         # Use package set for matching since it will match via
4575                                         # PROVIDE when necessary, while match_from_list does not.
4576                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4577                                         if not atom_set.findAtomForPackage(existing_node):
4578                                                 existing_node_matches = False
4579                                 if existing_node_matches:
4580                                         # The existing node can be reused.
4581                                         if args:
4582                                                 for arg in args:
4583                                                         self.digraph.add(existing_node, arg,
4584                                                                 priority=priority)
4585                                         # If a direct circular dependency is not an unsatisfied
4586                                         # buildtime dependency then drop it here since otherwise
4587                                         # it can skew the merge order calculation in an unwanted
4588                                         # way.
4589                                         if existing_node != myparent or \
4590                                                 (priority.buildtime and not priority.satisfied):
4591                                                 self.digraph.addnode(existing_node, myparent,
4592                                                         priority=priority)
4593                                         return 1
4594                                 else:
4595
4596                                         if pkg.cpv == existing_node.cpv and \
4597                                                 dep.atom is not None and \
4598                                                 dep.atom.use:
4599                                                 # Multiple different instances of the same version
4600                                                 # (typically one installed and another not yet
4601                                                 # installed) have been pulled into the graph due
4602                                                 # to a USE dependency. The "slot collision" display
4603                                                 # is not helpful in a case like this, so display it
4604                                                 # as an unsatisfied dependency.
4605                                                 self._unsatisfied_deps_for_display.append(
4606                                                         ((dep.root, dep.atom), {"myparent":dep.parent}))
4607                                                 self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4608                                                 self._slot_collision_nodes.add(pkg)
4609                                                 self.digraph.addnode(pkg, myparent, priority=priority)
4610                                                 return 0
4611
4612                                         if pkg in self._slot_collision_nodes:
4613                                                 return 1
4614                                         # A slot collision has occurred.  Sometimes this coincides
4615                                         # with unresolvable blockers, so the slot collision will be
4616                                         # shown later if there are no unresolvable blockers.
4617                                         self._slot_collision_info.add((pkg.slot_atom, pkg.root))
4618                                         self._slot_collision_nodes.add(pkg)
4619                                         slot_collision = True
4620
4621                         if slot_collision:
4622                                 # Now add this node to the graph so that self.display()
4623                                 # can show use flags and --tree portage.output.  This node is
4624                                 # only being partially added to the graph.  It must not be
4625                                 # allowed to interfere with the other nodes that have been
4626                                 # added.  Do not overwrite data for existing nodes in
4627                                 # self.mydbapi since that data will be used for blocker
4628                                 # validation.
4629                                 # Even though the graph is now invalid, continue to process
4630                                 # dependencies so that things like --fetchonly can still
4631                                 # function despite collisions.
4632                                 pass
4633                         else:
4634                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4635                                 self.mydbapi[pkg.root].cpv_inject(pkg)
4636
4637                         self.digraph.addnode(pkg, myparent, priority=priority)
4638
4639                         if not pkg.installed:
4640                                 # Allow this package to satisfy old-style virtuals in case it
4641                                 # doesn't already. Any pre-existing providers will be preferred
4642                                 # over this one.
4643                                 try:
4644                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
4645                                         # For consistency, also update the global virtuals.
4646                                         settings = self.roots[pkg.root].settings
4647                                         settings.unlock()
4648                                         settings.setinst(pkg.cpv, pkg.metadata)
4649                                         settings.lock()
4650                                 except portage.exception.InvalidDependString, e:
4651                                         show_invalid_depstring_notice(
4652                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4653                                         del e
4654                                         return 0
4655
4656                 if args:
4657                         self._set_nodes.add(pkg)
4658
4659                 # Do this even when addme is False (--onlydeps) so that the
4660                 # parent/child relationship is always known in case
4661                 # self._show_slot_collision_notice() needs to be called later.
4662                 if pkg.onlydeps:
4663                         self.digraph.add(pkg, myparent, priority=priority)
4664                 if args:
4665                         for arg in args:
4666                                 self.digraph.add(pkg, arg, priority=priority)
4667
4668                 """ This section determines whether we go deeper into dependencies or not.
4669                     We want to go deeper on a few occasions:
4670                     Installing package A, we need to make sure package A's deps are met.
4671                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
4672                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
4673                 """
4674                 dep_stack = self._dep_stack
4675                 if "recurse" not in self.myparams:
4676                         return 1
4677                 elif pkg.installed and \
4678                         "deep" not in self.myparams:
4679                         dep_stack = self._ignored_deps
4680
4681                 self.spinner.update()
4682
4683                 if args:
4684                         depth = 0
4685                 pkg.depth = depth
4686                 dep_stack.append(pkg)
4687                 return 1
4688
4689         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
4690
4691                 mytype = pkg.type_name
4692                 myroot = pkg.root
4693                 mykey = pkg.cpv
4694                 metadata = pkg.metadata
4695                 myuse = pkg.use.enabled
4696                 jbigkey = pkg
4697                 depth = pkg.depth + 1
4698                 removal_action = "remove" in self.myparams
4699
4700                 edepend={}
4701                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
4702                 for k in depkeys:
4703                         edepend[k] = metadata[k]
4704
4705                 if not pkg.built and \
4706                         "--buildpkgonly" in self.myopts and \
4707                         "deep" not in self.myparams and \
4708                         "empty" not in self.myparams:
4709                         edepend["RDEPEND"] = ""
4710                         edepend["PDEPEND"] = ""
4711                 bdeps_satisfied = False
4712                 
4713                 if pkg.built and not removal_action:
4714                         if self.myopts.get("--with-bdeps", "n") == "y":
4715                                 # Pull in build time deps as requested, but marked them as
4716                                 # "satisfied" since they are not strictly required. This allows
4717                                 # more freedom in the merge order calculation for solving
4718                                 # circular dependencies. Don't convert to PDEPEND since that
4719                                 # could make --with-bdeps=y less effective if it is used to
4720                                 # adjust merge order to prevent built_with_use() calls from
4721                                 # failing.
4722                                 bdeps_satisfied = True
4723                         else:
4724                                 # built packages do not have build time dependencies.
4725                                 edepend["DEPEND"] = ""
4726
4727                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
4728                         edepend["DEPEND"] = ""
4729
4730                 deps = (
4731                         ("/", edepend["DEPEND"],
4732                                 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
4733                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
4734                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
4735                 )
4736
4737                 debug = "--debug" in self.myopts
4738                 strict = mytype != "installed"
4739                 try:
4740                         for dep_root, dep_string, dep_priority in deps:
4741                                 if pkg.onlydeps:
4742                                         # Decrease priority so that --buildpkgonly
4743                                         # hasallzeros() works correctly.
4744                                         dep_priority = DepPriority()
4745                                 if not dep_string:
4746                                         continue
4747                                 if debug:
4748                                         print
4749                                         print "Parent:   ", jbigkey
4750                                         print "Depstring:", dep_string
4751                                         print "Priority:", dep_priority
4752                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
4753                                 try:
4754                                         selected_atoms = self._select_atoms(dep_root,
4755                                                 dep_string, myuse=myuse, parent=pkg, strict=strict)
4756                                 except portage.exception.InvalidDependString, e:
4757                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
4758                                         return 0
4759                                 if debug:
4760                                         print "Candidates:", selected_atoms
4761
4762                                 for atom in selected_atoms:
4763                                         try:
4764
4765                                                 atom = portage.dep.Atom(atom)
4766
4767                                                 mypriority = dep_priority.copy()
4768                                                 if not atom.blocker and vardb.match(atom):
4769                                                         mypriority.satisfied = True
4770
4771                                                 if not self._add_dep(Dependency(atom=atom,
4772                                                         blocker=atom.blocker, depth=depth, parent=pkg,
4773                                                         priority=mypriority, root=dep_root),
4774                                                         allow_unsatisfied=allow_unsatisfied):
4775                                                         return 0
4776
4777                                         except portage.exception.InvalidAtom, e:
4778                                                 show_invalid_depstring_notice(
4779                                                         pkg, dep_string, str(e))
4780                                                 del e
4781                                                 if not pkg.installed:
4782                                                         return 0
4783
4784                                 if debug:
4785                                         print "Exiting...", jbigkey
4786                 except portage.exception.AmbiguousPackageName, e:
4787                         pkgs = e.args[0]
4788                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
4789                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
4790                         for cpv in pkgs:
4791                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
4792                         portage.writemsg("\n", noiselevel=-1)
4793                         if mytype == "binary":
4794                                 portage.writemsg(
4795                                         "!!! This binary package cannot be installed: '%s'\n" % \
4796                                         mykey, noiselevel=-1)
4797                         elif mytype == "ebuild":
4798                                 portdb = self.roots[myroot].trees["porttree"].dbapi
4799                                 myebuild, mylocation = portdb.findname2(mykey)
4800                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
4801                                         "'%s'\n" % myebuild, noiselevel=-1)
4802                         portage.writemsg("!!! Please notify the package maintainer " + \
4803                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
4804                         return 0
4805                 return 1
4806
4807         def _priority(self, **kwargs):
4808                 if "remove" in self.myparams:
4809                         priority_constructor = UnmergeDepPriority
4810                 else:
4811                         priority_constructor = DepPriority
4812                 return priority_constructor(**kwargs)
4813
4814         def _dep_expand(self, root_config, atom_without_category):
4815                 """
4816                 @param root_config: a root config instance
4817                 @type root_config: RootConfig
4818                 @param atom_without_category: an atom without a category component
4819                 @type atom_without_category: String
4820                 @rtype: list
4821                 @returns: a list of atoms containing categories (possibly empty)
4822                 """
4823                 null_cp = portage.dep_getkey(insert_category_into_atom(
4824                         atom_without_category, "null"))
4825                 cat, atom_pn = portage.catsplit(null_cp)
4826
4827                 cp_set = set()
4828                 for db, pkg_type, built, installed, db_keys in \
4829                         self._filtered_trees[root_config.root]["dbs"]:
4830                         cp_set.update(db.cp_all())
4831                 for cp in list(cp_set):
4832                         cat, pn = portage.catsplit(cp)
4833                         if pn != atom_pn:
4834                                 cp_set.discard(cp)
4835                 deps = []
4836                 for cp in cp_set:
4837                         cat, pn = portage.catsplit(cp)
4838                         deps.append(insert_category_into_atom(
4839                                 atom_without_category, cat))
4840                 return deps
4841
4842         def _have_new_virt(self, root, atom_cp):
4843                 ret = False
4844                 for db, pkg_type, built, installed, db_keys in \
4845                         self._filtered_trees[root]["dbs"]:
4846                         if db.cp_list(atom_cp):
4847                                 ret = True
4848                                 break
4849                 return ret
4850
4851         def _iter_atoms_for_pkg(self, pkg):
4852                 # TODO: add multiple $ROOT support
4853                 if pkg.root != self.target_root:
4854                         return
4855                 atom_arg_map = self._atom_arg_map
4856                 root_config = self.roots[pkg.root]
4857                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
4858                         atom_cp = portage.dep_getkey(atom)
4859                         if atom_cp != pkg.cp and \
4860                                 self._have_new_virt(pkg.root, atom_cp):
4861                                 continue
4862                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
4863                         visible_pkgs.reverse() # descending order
4864                         higher_slot = None
4865                         for visible_pkg in visible_pkgs:
4866                                 if visible_pkg.cp != atom_cp:
4867                                         continue
4868                                 if pkg >= visible_pkg:
4869                                         # This is descending order, and we're not
4870                                         # interested in any versions <= pkg given.
4871                                         break
4872                                 if pkg.slot_atom != visible_pkg.slot_atom:
4873                                         higher_slot = visible_pkg
4874                                         break
4875                         if higher_slot is not None:
4876                                 continue
4877                         for arg in atom_arg_map[(atom, pkg.root)]:
4878                                 if isinstance(arg, PackageArg) and \
4879                                         arg.package != pkg:
4880                                         continue
4881                                 yield arg, atom
4882
4883         def select_files(self, myfiles):
4884                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
4885                 appropriate depgraph and return a favorite list."""
4886                 debug = "--debug" in self.myopts
4887                 root_config = self.roots[self.target_root]
4888                 sets = root_config.sets
4889                 getSetAtoms = root_config.setconfig.getSetAtoms
4890                 myfavorites=[]
4891                 myroot = self.target_root
4892                 dbs = self._filtered_trees[myroot]["dbs"]
4893                 vardb = self.trees[myroot]["vartree"].dbapi
4894                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
4895                 portdb = self.trees[myroot]["porttree"].dbapi
4896                 bindb = self.trees[myroot]["bintree"].dbapi
4897                 pkgsettings = self.pkgsettings[myroot]
4898                 args = []
4899                 onlydeps = "--onlydeps" in self.myopts
4900                 lookup_owners = []
4901                 for x in myfiles:
4902                         ext = os.path.splitext(x)[1]
4903                         if ext==".tbz2":
4904                                 if not os.path.exists(x):
4905                                         if os.path.exists(
4906                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
4907                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
4908                                         elif os.path.exists(
4909                                                 os.path.join(pkgsettings["PKGDIR"], x)):
4910                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
4911                                         else:
4912                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
4913                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
4914                                                 return 0, myfavorites
4915                                 mytbz2=portage.xpak.tbz2(x)
4916                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
4917                                 if os.path.realpath(x) != \
4918                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
4919                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
4920                                         return 0, myfavorites
4921                                 db_keys = list(bindb._aux_cache_keys)
4922                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
4923                                 pkg = Package(type_name="binary", root_config=root_config,
4924                                         cpv=mykey, built=True, metadata=metadata,
4925                                         onlydeps=onlydeps)
4926                                 self._pkg_cache[pkg] = pkg
4927                                 args.append(PackageArg(arg=x, package=pkg,
4928                                         root_config=root_config))
4929                         elif ext==".ebuild":
4930                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
4931                                 pkgdir = os.path.dirname(ebuild_path)
4932                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
4933                                 cp = pkgdir[len(tree_root)+1:]
4934                                 e = portage.exception.PackageNotFound(
4935                                         ("%s is not in a valid portage tree " + \
4936                                         "hierarchy or does not exist") % x)
4937                                 if not portage.isvalidatom(cp):
4938                                         raise e
4939                                 cat = portage.catsplit(cp)[0]
4940                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
4941                                 if not portage.isvalidatom("="+mykey):
4942                                         raise e
4943                                 ebuild_path = portdb.findname(mykey)
4944                                 if ebuild_path:
4945                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
4946                                                 cp, os.path.basename(ebuild_path)):
4947                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
4948                                                 return 0, myfavorites
4949                                         if mykey not in portdb.xmatch(
4950                                                 "match-visible", portage.dep_getkey(mykey)):
4951                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
4952                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
4953                                                 print colorize("BAD", "*** page for details.")
4954                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
4955                                                         "Continuing...")
4956                                 else:
4957                                         raise portage.exception.PackageNotFound(
4958                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
4959                                 db_keys = list(portdb._aux_cache_keys)
4960                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
4961                                 pkg = Package(type_name="ebuild", root_config=root_config,
4962                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
4963                                 pkgsettings.setcpv(pkg)
4964                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
4965                                 self._pkg_cache[pkg] = pkg
4966                                 args.append(PackageArg(arg=x, package=pkg,
4967                                         root_config=root_config))
4968                         elif x.startswith(os.path.sep):
4969                                 if not x.startswith(myroot):
4970                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
4971                                                 " $ROOT.\n") % x, noiselevel=-1)
4972                                         return 0, []
4973                                 # Queue these up since it's most efficient to handle
4974                                 # multiple files in a single iter_owners() call.
4975                                 lookup_owners.append(x)
4976                         else:
4977                                 if x in ("system", "world"):
4978                                         x = SETPREFIX + x
4979                                 if x.startswith(SETPREFIX):
4980                                         s = x[len(SETPREFIX):]
4981                                         if s not in sets:
4982                                                 raise portage.exception.PackageSetNotFound(s)
4983                                         if s in self._sets:
4984                                                 continue
4985                                         # Recursively expand sets so that containment tests in
4986                                         # self._get_parent_sets() properly match atoms in nested
4987                                         # sets (like if world contains system).
4988                                         expanded_set = InternalPackageSet(
4989                                                 initial_atoms=getSetAtoms(s))
4990                                         self._sets[s] = expanded_set
4991                                         args.append(SetArg(arg=x, set=expanded_set,
4992                                                 root_config=root_config))
4993                                         myfavorites.append(x)
4994                                         continue
4995                                 if not is_valid_package_atom(x):
4996                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
4997                                                 noiselevel=-1)
4998                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
4999                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5000                                         return (0,[])
5001                                 # Don't expand categories or old-style virtuals here unless
5002                                 # necessary. Expansion of old-style virtuals here causes at
5003                                 # least the following problems:
5004                                 #   1) It's more difficult to determine which set(s) an atom
5005                                 #      came from, if any.
5006                                 #   2) It takes away freedom from the resolver to choose other
5007                                 #      possible expansions when necessary.
5008                                 if "/" in x:
5009                                         args.append(AtomArg(arg=x, atom=x,
5010                                                 root_config=root_config))
5011                                         continue
5012                                 expanded_atoms = self._dep_expand(root_config, x)
5013                                 installed_cp_set = set()
5014                                 for atom in expanded_atoms:
5015                                         atom_cp = portage.dep_getkey(atom)
5016                                         if vardb.cp_list(atom_cp):
5017                                                 installed_cp_set.add(atom_cp)
5018                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5019                                         installed_cp = iter(installed_cp_set).next()
5020                                         expanded_atoms = [atom for atom in expanded_atoms \
5021                                                 if portage.dep_getkey(atom) == installed_cp]
5022
5023                                 if len(expanded_atoms) > 1:
5024                                         print
5025                                         print
5026                                         ambiguous_package_name(x, expanded_atoms, root_config,
5027                                                 self.spinner, self.myopts)
5028                                         return False, myfavorites
5029                                 if expanded_atoms:
5030                                         atom = expanded_atoms[0]
5031                                 else:
5032                                         null_atom = insert_category_into_atom(x, "null")
5033                                         null_cp = portage.dep_getkey(null_atom)
5034                                         cat, atom_pn = portage.catsplit(null_cp)
5035                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5036                                         if virts_p:
5037                                                 # Allow the depgraph to choose which virtual.
5038                                                 atom = insert_category_into_atom(x, "virtual")
5039                                         else:
5040                                                 atom = insert_category_into_atom(x, "null")
5041
5042                                 args.append(AtomArg(arg=x, atom=atom,
5043                                         root_config=root_config))
5044
5045                 if lookup_owners:
5046                         relative_paths = []
5047                         search_for_multiple = False
5048                         if len(lookup_owners) > 1:
5049                                 search_for_multiple = True
5050
5051                         for x in lookup_owners:
5052                                 if not search_for_multiple and os.path.isdir(x):
5053                                         search_for_multiple = True
5054                                 relative_paths.append(x[len(myroot):])
5055
5056                         owners = set()
5057                         for pkg, relative_path in \
5058                                 real_vardb._owners.iter_owners(relative_paths):
5059                                 owners.add(pkg.mycpv)
5060                                 if not search_for_multiple:
5061                                         break
5062
5063                         if not owners:
5064                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5065                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5066                                 return 0, []
5067
5068                         for cpv in owners:
5069                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5070                                 if not slot:
5071                                         # portage now masks packages with missing slot, but it's
5072                                         # possible that one was installed by an older version
5073                                         atom = portage.cpv_getkey(cpv)
5074                                 else:
5075                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5076                                 args.append(AtomArg(arg=atom, atom=atom,
5077                                         root_config=root_config))
5078
5079                 if "--update" in self.myopts:
5080                         # Enable greedy SLOT atoms for atoms given as arguments.
5081                         # This is currently disabled for sets since greedy SLOT
5082                         # atoms could be a property of the set itself.
5083                         greedy_atoms = []
5084                         for arg in args:
5085                                 # In addition to any installed slots, also try to pull
5086                                 # in the latest new slot that may be available.
5087                                 greedy_atoms.append(arg)
5088                                 if not isinstance(arg, (AtomArg, PackageArg)):
5089                                         continue
5090                                 atom_cp = portage.dep_getkey(arg.atom)
5091                                 slots = set()
5092                                 for cpv in vardb.match(arg.atom):
5093                                         slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5094                                 for slot in slots:
5095                                         greedy_atoms.append(
5096                                                 AtomArg(arg=arg.arg, atom="%s:%s" % (atom_cp, slot),
5097                                                         root_config=root_config))
5098                         args = greedy_atoms
5099                         del greedy_atoms
5100
5101                 # Create the "args" package set from atoms and
5102                 # packages given as arguments.
5103                 args_set = self._sets["args"]
5104                 for arg in args:
5105                         if not isinstance(arg, (AtomArg, PackageArg)):
5106                                 continue
5107                         myatom = arg.atom
5108                         if myatom in args_set:
5109                                 continue
5110                         args_set.add(myatom)
5111                         myfavorites.append(myatom)
5112                 self._set_atoms.update(chain(*self._sets.itervalues()))
5113                 atom_arg_map = self._atom_arg_map
5114                 for arg in args:
5115                         for atom in arg.set:
5116                                 atom_key = (atom, myroot)
5117                                 refs = atom_arg_map.get(atom_key)
5118                                 if refs is None:
5119                                         refs = []
5120                                         atom_arg_map[atom_key] = refs
5121                                         if arg not in refs:
5122                                                 refs.append(arg)
5123                 pprovideddict = pkgsettings.pprovideddict
5124                 if debug:
5125                         portage.writemsg("\n", noiselevel=-1)
5126                 # Order needs to be preserved since a feature of --nodeps
5127                 # is to allow the user to force a specific merge order.
5128                 args.reverse()
5129                 while args:
5130                         arg = args.pop()
5131                         for atom in arg.set:
5132                                 self.spinner.update()
5133                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5134                                         root=myroot, parent=arg)
5135                                 atom_cp = portage.dep_getkey(atom)
5136                                 try:
5137                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5138                                         if pprovided and portage.match_from_list(atom, pprovided):
5139                                                 # A provided package has been specified on the command line.
5140                                                 self._pprovided_args.append((arg, atom))
5141                                                 continue
5142                                         if isinstance(arg, PackageArg):
5143                                                 if not self._add_pkg(arg.package, dep) or \
5144                                                         not self._create_graph():
5145                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5146                                                                 "dependencies for %s\n") % arg.arg)
5147                                                         return 0, myfavorites
5148                                                 continue
5149                                         if debug:
5150                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5151                                                         (arg, atom), noiselevel=-1)
5152                                         pkg, existing_node = self._select_package(
5153                                                 myroot, atom, onlydeps=onlydeps)
5154                                         if not pkg:
5155                                                 if not (isinstance(arg, SetArg) and \
5156                                                         arg.name in ("system", "world")):
5157                                                         self._unsatisfied_deps_for_display.append(
5158                                                                 ((myroot, atom), {}))
5159                                                         return 0, myfavorites
5160                                                 self._missing_args.append((arg, atom))
5161                                                 continue
5162                                         if atom_cp != pkg.cp:
5163                                                 # For old-style virtuals, we need to repeat the
5164                                                 # package.provided check against the selected package.
5165                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5166                                                 pprovided = pprovideddict.get(pkg.cp)
5167                                                 if pprovided and \
5168                                                         portage.match_from_list(expanded_atom, pprovided):
5169                                                         # A provided package has been
5170                                                         # specified on the command line.
5171                                                         self._pprovided_args.append((arg, atom))
5172                                                         continue
5173                                         if pkg.installed and "selective" not in self.myparams:
5174                                                 self._unsatisfied_deps_for_display.append(
5175                                                         ((myroot, atom), {}))
5176                                                 # Previous behavior was to bail out in this case, but
5177                                                 # since the dep is satisfied by the installed package,
5178                                                 # it's more friendly to continue building the graph
5179                                                 # and just show a warning message. Therefore, only bail
5180                                                 # out here if the atom is not from either the system or
5181                                                 # world set.
5182                                                 if not (isinstance(arg, SetArg) and \
5183                                                         arg.name in ("system", "world")):
5184                                                         return 0, myfavorites
5185
5186                                         # Add the selected package to the graph as soon as possible
5187                                         # so that later dep_check() calls can use it as feedback
5188                                         # for making more consistent atom selections.
5189                                         if not self._add_pkg(pkg, dep):
5190                                                 if isinstance(arg, SetArg):
5191                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5192                                                                 "dependencies for %s from %s\n") % \
5193                                                                 (atom, arg.arg))
5194                                                 else:
5195                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5196                                                                 "dependencies for %s\n") % atom)
5197                                                 return 0, myfavorites
5198
5199                                 except portage.exception.MissingSignature, e:
5200                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5201                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5202                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5203                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5204                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5205                                         return 0, myfavorites
5206                                 except portage.exception.InvalidSignature, e:
5207                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5208                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5209                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5210                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5211                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5212                                         return 0, myfavorites
5213                                 except SystemExit, e:
5214                                         raise # Needed else can't exit
5215                                 except Exception, e:
5216                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5217                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5218                                         raise
5219
5220                 # Now that the root packages have been added to the graph,
5221                 # process the dependencies.
5222                 if not self._create_graph():
5223                         return 0, myfavorites
5224
5225                 missing=0
5226                 if "--usepkgonly" in self.myopts:
5227                         for xs in self.digraph.all_nodes():
5228                                 if not isinstance(xs, Package):
5229                                         continue
5230                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5231                                         if missing == 0:
5232                                                 print
5233                                         missing += 1
5234                                         print "Missing binary for:",xs[2]
5235
5236                 try:
5237                         self.altlist()
5238                 except self._unknown_internal_error:
5239                         return False, myfavorites
5240
5241                 # We're true here unless we are missing binaries.
5242                 return (not missing,myfavorites)
5243
5244         def _select_atoms_from_graph(self, *pargs, **kwargs):
5245                 """
5246                 Prefer atoms matching packages that have already been
5247                 added to the graph or those that are installed and have
5248                 not been scheduled for replacement.
5249                 """
5250                 kwargs["trees"] = self._graph_trees
5251                 return self._select_atoms_highest_available(*pargs, **kwargs)
5252
5253         def _select_atoms_highest_available(self, root, depstring,
5254                 myuse=None, parent=None, strict=True, trees=None):
5255                 """This will raise InvalidDependString if necessary. If trees is
5256                 None then self._filtered_trees is used."""
5257                 pkgsettings = self.pkgsettings[root]
5258                 if trees is None:
5259                         trees = self._filtered_trees
5260                 if True:
5261                         try:
5262                                 if parent is not None:
5263                                         trees[root]["parent"] = parent
5264                                 if not strict:
5265                                         portage.dep._dep_check_strict = False
5266                                 mycheck = portage.dep_check(depstring, None,
5267                                         pkgsettings, myuse=myuse,
5268                                         myroot=root, trees=trees)
5269                         finally:
5270                                 if parent is not None:
5271                                         trees[root].pop("parent")
5272                                 portage.dep._dep_check_strict = True
5273                         if not mycheck[0]:
5274                                 raise portage.exception.InvalidDependString(mycheck[1])
5275                         selected_atoms = mycheck[1]
5276                 return selected_atoms
5277
5278         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5279                 atom = portage.dep.Atom(atom)
5280                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5281                 atom_without_use = atom
5282                 if atom.use:
5283                         atom_without_use = portage.dep.remove_slot(atom)
5284                         if atom.slot:
5285                                 atom_without_use += ":" + atom.slot
5286                         atom_without_use = portage.dep.Atom(atom_without_use)
5287                 xinfo = '"%s"' % atom
5288                 if arg:
5289                         xinfo='"%s"' % arg
5290                 # Discard null/ from failed cpv_expand category expansion.
5291                 xinfo = xinfo.replace("null/", "")
5292                 masked_packages = []
5293                 missing_use = []
5294                 missing_licenses = []
5295                 have_eapi_mask = False
5296                 pkgsettings = self.pkgsettings[root]
5297                 implicit_iuse = pkgsettings._get_implicit_iuse()
5298                 root_config = self.roots[root]
5299                 portdb = self.roots[root].trees["porttree"].dbapi
5300                 dbs = self._filtered_trees[root]["dbs"]
5301                 for db, pkg_type, built, installed, db_keys in dbs:
5302                         if installed:
5303                                 continue
5304                         match = db.match
5305                         if hasattr(db, "xmatch"):
5306                                 cpv_list = db.xmatch("match-all", atom_without_use)
5307                         else:
5308                                 cpv_list = db.match(atom_without_use)
5309                         # descending order
5310                         cpv_list.reverse()
5311                         for cpv in cpv_list:
5312                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5313                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5314                                 if metadata is not None:
5315                                         pkg = Package(built=built, cpv=cpv,
5316                                                 installed=installed, metadata=metadata,
5317                                                 root_config=root_config)
5318                                         if pkg.cp != atom.cp:
5319                                                 # A cpv can be returned from dbapi.match() as an
5320                                                 # old-style virtual match even in cases when the
5321                                                 # package does not actually PROVIDE the virtual.
5322                                                 # Filter out any such false matches here.
5323                                                 if not atom_set.findAtomForPackage(pkg):
5324                                                         continue
5325                                         if atom.use and not mreasons:
5326                                                 missing_use.append(pkg)
5327                                                 continue
5328                                 masked_packages.append(
5329                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5330
5331                 missing_use_reasons = []
5332                 missing_iuse_reasons = []
5333                 for pkg in missing_use:
5334                         use = pkg.use.enabled
5335                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5336                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5337                         missing_iuse = []
5338                         for x in atom.use.required:
5339                                 if iuse_re.match(x) is None:
5340                                         missing_iuse.append(x)
5341                         mreasons = []
5342                         if missing_iuse:
5343                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5344                                 missing_iuse_reasons.append((pkg, mreasons))
5345                         else:
5346                                 need_enable = sorted(atom.use.enabled.difference(use))
5347                                 need_disable = sorted(atom.use.disabled.intersection(use))
5348                                 if need_enable or need_disable:
5349                                         changes = []
5350                                         changes.extend(colorize("red", "+" + x) \
5351                                                 for x in need_enable)
5352                                         changes.extend(colorize("blue", "-" + x) \
5353                                                 for x in need_disable)
5354                                         mreasons.append("Change USE: %s" % " ".join(changes))
5355                                         missing_use_reasons.append((pkg, mreasons))
5356
5357                 if missing_iuse_reasons and not missing_use_reasons:
5358                         missing_use_reasons = missing_iuse_reasons
5359                 elif missing_use_reasons:
5360                         # Only show the latest version.
5361                         del missing_use_reasons[1:]
5362
5363                 if missing_use_reasons:
5364                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5365                         print "!!! One of the following packages is required to complete your request:"
5366                         for pkg, mreasons in missing_use_reasons:
5367                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5368
5369                 elif masked_packages:
5370                         print "\n!!! " + \
5371                                 colorize("BAD", "All ebuilds that could satisfy ") + \
5372                                 colorize("INFORM", xinfo) + \
5373                                 colorize("BAD", " have been masked.")
5374                         print "!!! One of the following masked packages is required to complete your request:"
5375                         have_eapi_mask = show_masked_packages(masked_packages)
5376                         if have_eapi_mask:
5377                                 print
5378                                 msg = ("The current version of portage supports " + \
5379                                         "EAPI '%s'. You must upgrade to a newer version" + \
5380                                         " of portage before EAPI masked packages can" + \
5381                                         " be installed.") % portage.const.EAPI
5382                                 from textwrap import wrap
5383                                 for line in wrap(msg, 75):
5384                                         print line
5385                         print
5386                         show_mask_docs()
5387                 else:
5388                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5389
5390                 # Show parent nodes and the argument that pulled them in.
5391                 traversed_nodes = set()
5392                 node = myparent
5393                 msg = []
5394                 while node is not None:
5395                         traversed_nodes.add(node)
5396                         msg.append('(dependency required by "%s" [%s])' % \
5397                                 (colorize('INFORM', str(node.cpv)), node.type_name))
5398                         # When traversing to parents, prefer arguments over packages
5399                         # since arguments are root nodes. Never traverse the same
5400                         # package twice, in order to prevent an infinite loop.
5401                         selected_parent = None
5402                         for parent in self.digraph.parent_nodes(node):
5403                                 if isinstance(parent, DependencyArg):
5404                                         msg.append('(dependency required by "%s" [argument])' % \
5405                                                 (colorize('INFORM', str(parent))))
5406                                         selected_parent = None
5407                                         break
5408                                 if parent not in traversed_nodes:
5409                                         selected_parent = parent
5410                         node = selected_parent
5411                 for line in msg:
5412                         print line
5413
5414                 print
5415
5416         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5417                 cache_key = (root, atom, onlydeps)
5418                 ret = self._highest_pkg_cache.get(cache_key)
5419                 if ret is not None:
5420                         pkg, existing = ret
5421                         if pkg and not existing:
5422                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5423                                 if existing and existing == pkg:
5424                                         # Update the cache to reflect that the
5425                                         # package has been added to the graph.
5426                                         ret = pkg, pkg
5427                                         self._highest_pkg_cache[cache_key] = ret
5428                         return ret
5429                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5430                 self._highest_pkg_cache[cache_key] = ret
5431                 return ret
5432
5433         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5434                 root_config = self.roots[root]
5435                 pkgsettings = self.pkgsettings[root]
5436                 dbs = self._filtered_trees[root]["dbs"]
5437                 vardb = self.roots[root].trees["vartree"].dbapi
5438                 portdb = self.roots[root].trees["porttree"].dbapi
5439                 # List of acceptable packages, ordered by type preference.
5440                 matched_packages = []
5441                 highest_version = None
5442                 if not isinstance(atom, portage.dep.Atom):
5443                         atom = portage.dep.Atom(atom)
5444                 atom_cp = atom.cp
5445                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5446                 existing_node = None
5447                 myeb = None
5448                 usepkgonly = "--usepkgonly" in self.myopts
5449                 empty = "empty" in self.myparams
5450                 selective = "selective" in self.myparams
5451                 reinstall = False
5452                 noreplace = "--noreplace" in self.myopts
5453                 # Behavior of the "selective" parameter depends on
5454                 # whether or not a package matches an argument atom.
5455                 # If an installed package provides an old-style
5456                 # virtual that is no longer provided by an available
5457                 # package, the installed package may match an argument
5458                 # atom even though none of the available packages do.
5459                 # Therefore, "selective" logic does not consider
5460                 # whether or not an installed package matches an
5461                 # argument atom. It only considers whether or not
5462                 # available packages match argument atoms, which is
5463                 # represented by the found_available_arg flag.
5464                 found_available_arg = False
5465                 for find_existing_node in True, False:
5466                         if existing_node:
5467                                 break
5468                         for db, pkg_type, built, installed, db_keys in dbs:
5469                                 if existing_node:
5470                                         break
5471                                 if installed and not find_existing_node:
5472                                         want_reinstall = reinstall or empty or \
5473                                                 (found_available_arg and not selective)
5474                                         if want_reinstall and matched_packages:
5475                                                 continue
5476                                 if hasattr(db, "xmatch"):
5477                                         cpv_list = db.xmatch("match-all", atom)
5478                                 else:
5479                                         cpv_list = db.match(atom)
5480
5481                                 # USE=multislot can make an installed package appear as if
5482                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5483                                 # won't do any good as long as USE=multislot is enabled since
5484                                 # the newly built package still won't have the expected slot.
5485                                 # Therefore, assume that such SLOT dependencies are already
5486                                 # satisfied rather than forcing a rebuild.
5487                                 if installed and not cpv_list and atom.slot:
5488                                         for cpv in db.match(atom.cp):
5489                                                 slot_available = False
5490                                                 for other_db, other_type, other_built, \
5491                                                         other_installed, other_keys in dbs:
5492                                                         try:
5493                                                                 if atom.slot == \
5494                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
5495                                                                         slot_available = True
5496                                                                         break
5497                                                         except KeyError:
5498                                                                 pass
5499                                                 if not slot_available:
5500                                                         continue
5501                                                 inst_pkg = self._pkg(cpv, "installed",
5502                                                         root_config, installed=installed)
5503                                                 # Remove the slot from the atom and verify that
5504                                                 # the package matches the resulting atom.
5505                                                 atom_without_slot = portage.dep.remove_slot(atom)
5506                                                 if atom.use:
5507                                                         atom_without_slot += str(atom.use)
5508                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
5509                                                 if portage.match_from_list(
5510                                                         atom_without_slot, [inst_pkg]):
5511                                                         cpv_list = [inst_pkg.cpv]
5512                                                 break
5513
5514                                 if not cpv_list:
5515                                         continue
5516                                 pkg_status = "merge"
5517                                 if installed or onlydeps:
5518                                         pkg_status = "nomerge"
5519                                 # descending order
5520                                 cpv_list.reverse()
5521                                 for cpv in cpv_list:
5522                                         # Make --noreplace take precedence over --newuse.
5523                                         if not installed and noreplace and \
5524                                                 cpv in vardb.match(atom):
5525                                                 # If the installed version is masked, it may
5526                                                 # be necessary to look at lower versions,
5527                                                 # in case there is a visible downgrade.
5528                                                 continue
5529                                         reinstall_for_flags = None
5530                                         cache_key = (pkg_type, root, cpv, pkg_status)
5531                                         calculated_use = True
5532                                         pkg = self._pkg_cache.get(cache_key)
5533                                         if pkg is None:
5534                                                 calculated_use = False
5535                                                 try:
5536                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5537                                                 except KeyError:
5538                                                         continue
5539                                                 pkg = Package(built=built, cpv=cpv,
5540                                                         installed=installed, metadata=metadata,
5541                                                         onlydeps=onlydeps, root_config=root_config,
5542                                                         type_name=pkg_type)
5543                                                 metadata = pkg.metadata
5544                                                 if not built and ("?" in metadata["LICENSE"] or \
5545                                                         "?" in metadata["PROVIDE"]):
5546                                                         # This is avoided whenever possible because
5547                                                         # it's expensive. It only needs to be done here
5548                                                         # if it has an effect on visibility.
5549                                                         pkgsettings.setcpv(pkg)
5550                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
5551                                                         calculated_use = True
5552                                                 self._pkg_cache[pkg] = pkg
5553
5554                                         if not installed or (installed and matched_packages):
5555                                                 # Only enforce visibility on installed packages
5556                                                 # if there is at least one other visible package
5557                                                 # available. By filtering installed masked packages
5558                                                 # here, packages that have been masked since they
5559                                                 # were installed can be automatically downgraded
5560                                                 # to an unmasked version.
5561                                                 try:
5562                                                         if not visible(pkgsettings, pkg):
5563                                                                 continue
5564                                                 except portage.exception.InvalidDependString:
5565                                                         if not installed:
5566                                                                 continue
5567
5568                                                 # Enable upgrade or downgrade to a version
5569                                                 # with visible KEYWORDS when the installed
5570                                                 # version is masked by KEYWORDS, but never
5571                                                 # reinstall the same exact version only due
5572                                                 # to a KEYWORDS mask.
5573                                                 if installed and matched_packages and \
5574                                                         pkgsettings._getMissingKeywords(
5575                                                         pkg.cpv, pkg.metadata):
5576                                                         different_version = None
5577                                                         for avail_pkg in matched_packages:
5578                                                                 if not portage.dep.cpvequal(
5579                                                                         pkg.cpv, avail_pkg.cpv):
5580                                                                         different_version = avail_pkg
5581                                                                         break
5582                                                         if different_version is not None:
5583                                                                 # Only reinstall for KEYWORDS if
5584                                                                 # it's not the same version.
5585                                                                 continue
5586
5587                                         if not pkg.built and not calculated_use:
5588                                                 # This is avoided whenever possible because
5589                                                 # it's expensive.
5590                                                 pkgsettings.setcpv(pkg)
5591                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5592
5593                                         if pkg.cp != atom.cp:
5594                                                 # A cpv can be returned from dbapi.match() as an
5595                                                 # old-style virtual match even in cases when the
5596                                                 # package does not actually PROVIDE the virtual.
5597                                                 # Filter out any such false matches here.
5598                                                 if not atom_set.findAtomForPackage(pkg):
5599                                                         continue
5600
5601                                         myarg = None
5602                                         if root == self.target_root:
5603                                                 try:
5604                                                         # Ebuild USE must have been calculated prior
5605                                                         # to this point, in case atoms have USE deps.
5606                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
5607                                                 except StopIteration:
5608                                                         pass
5609                                                 except portage.exception.InvalidDependString:
5610                                                         if not installed:
5611                                                                 # masked by corruption
5612                                                                 continue
5613                                         if not installed and myarg:
5614                                                 found_available_arg = True
5615
5616                                         if atom.use and not pkg.built:
5617                                                 use = pkg.use.enabled
5618                                                 if atom.use.enabled.difference(use):
5619                                                         continue
5620                                                 if atom.use.disabled.intersection(use):
5621                                                         continue
5622                                         if pkg.cp == atom_cp:
5623                                                 if highest_version is None:
5624                                                         highest_version = pkg
5625                                                 elif pkg > highest_version:
5626                                                         highest_version = pkg
5627                                         # At this point, we've found the highest visible
5628                                         # match from the current repo. Any lower versions
5629                                         # from this repo are ignored, so this so the loop
5630                                         # will always end with a break statement below
5631                                         # this point.
5632                                         if find_existing_node:
5633                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
5634                                                 if not e_pkg:
5635                                                         break
5636                                                 if portage.dep.match_from_list(atom, [e_pkg]):
5637                                                         if highest_version and \
5638                                                                 e_pkg.cp == atom_cp and \
5639                                                                 e_pkg < highest_version and \
5640                                                                 e_pkg.slot_atom != highest_version.slot_atom:
5641                                                                 # There is a higher version available in a
5642                                                                 # different slot, so this existing node is
5643                                                                 # irrelevant.
5644                                                                 pass
5645                                                         else:
5646                                                                 matched_packages.append(e_pkg)
5647                                                                 existing_node = e_pkg
5648                                                 break
5649                                         # Compare built package to current config and
5650                                         # reject the built package if necessary.
5651                                         if built and not installed and \
5652                                                 ("--newuse" in self.myopts or \
5653                                                 "--reinstall" in self.myopts):
5654                                                 iuses = pkg.iuse.all
5655                                                 old_use = pkg.use.enabled
5656                                                 if myeb:
5657                                                         pkgsettings.setcpv(myeb)
5658                                                 else:
5659                                                         pkgsettings.setcpv(pkg)
5660                                                 now_use = pkgsettings["PORTAGE_USE"].split()
5661                                                 forced_flags = set()
5662                                                 forced_flags.update(pkgsettings.useforce)
5663                                                 forced_flags.update(pkgsettings.usemask)
5664                                                 cur_iuse = iuses
5665                                                 if myeb and not usepkgonly:
5666                                                         cur_iuse = myeb.iuse.all
5667                                                 if self._reinstall_for_flags(forced_flags,
5668                                                         old_use, iuses,
5669                                                         now_use, cur_iuse):
5670                                                         break
5671                                         # Compare current config to installed package
5672                                         # and do not reinstall if possible.
5673                                         if not installed and \
5674                                                 ("--newuse" in self.myopts or \
5675                                                 "--reinstall" in self.myopts) and \
5676                                                 cpv in vardb.match(atom):
5677                                                 pkgsettings.setcpv(pkg)
5678                                                 forced_flags = set()
5679                                                 forced_flags.update(pkgsettings.useforce)
5680                                                 forced_flags.update(pkgsettings.usemask)
5681                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
5682                                                 old_iuse = set(filter_iuse_defaults(
5683                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
5684                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
5685                                                 cur_iuse = pkg.iuse.all
5686                                                 reinstall_for_flags = \
5687                                                         self._reinstall_for_flags(
5688                                                         forced_flags, old_use, old_iuse,
5689                                                         cur_use, cur_iuse)
5690                                                 if reinstall_for_flags:
5691                                                         reinstall = True
5692                                         if not built:
5693                                                 myeb = pkg
5694                                         matched_packages.append(pkg)
5695                                         if reinstall_for_flags:
5696                                                 self._reinstall_nodes[pkg] = \
5697                                                         reinstall_for_flags
5698                                         break
5699
5700                 if not matched_packages:
5701                         return None, None
5702
5703                 if "--debug" in self.myopts:
5704                         for pkg in matched_packages:
5705                                 portage.writemsg("%s %s\n" % \
5706                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
5707
5708                 # Filter out any old-style virtual matches if they are
5709                 # mixed with new-style virtual matches.
5710                 cp = portage.dep_getkey(atom)
5711                 if len(matched_packages) > 1 and \
5712                         "virtual" == portage.catsplit(cp)[0]:
5713                         for pkg in matched_packages:
5714                                 if pkg.cp != cp:
5715                                         continue
5716                                 # Got a new-style virtual, so filter
5717                                 # out any old-style virtuals.
5718                                 matched_packages = [pkg for pkg in matched_packages \
5719                                         if pkg.cp == cp]
5720                                 break
5721
5722                 # If the installed version is in a different slot and it is higher than
5723                 # the highest available visible package, _iter_atoms_for_pkg() may fail
5724                 # to properly match the available package with a corresponding argument
5725                 # atom. Detect this case and correct it here.
5726                 if not selective and len(matched_packages) > 1 and \
5727                         matched_packages[-1].installed and \
5728                         matched_packages[-1].slot_atom != \
5729                         matched_packages[-2].slot_atom and \
5730                         matched_packages[-1] > matched_packages[-2]:
5731                         pkg = matched_packages[-2]
5732                         if pkg.root == self.target_root and \
5733                                 self._set_atoms.findAtomForPackage(pkg):
5734                                 # Select the available package instead
5735                                 # of the installed package.
5736                                 matched_packages.pop()
5737
5738                 if len(matched_packages) > 1:
5739                         bestmatch = portage.best(
5740                                 [pkg.cpv for pkg in matched_packages])
5741                         matched_packages = [pkg for pkg in matched_packages \
5742                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
5743
5744                 # ordered by type preference ("ebuild" type is the last resort)
5745                 return  matched_packages[-1], existing_node
5746
5747         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
5748                 """
5749                 Select packages that have already been added to the graph or
5750                 those that are installed and have not been scheduled for
5751                 replacement.
5752                 """
5753                 graph_db = self._graph_trees[root]["porttree"].dbapi
5754                 matches = graph_db.match(atom)
5755                 if not matches:
5756                         return None, None
5757                 cpv = matches[-1] # highest match
5758                 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
5759                         graph_db.aux_get(cpv, ["SLOT"])[0])
5760                 e_pkg = self._slot_pkg_map[root].get(slot_atom)
5761                 if e_pkg:
5762                         return e_pkg, e_pkg
5763                 # Since this cpv exists in the graph_db,
5764                 # we must have a cached Package instance.
5765                 cache_key = ("installed", root, cpv, "nomerge")
5766                 return (self._pkg_cache[cache_key], None)
5767
5768         def _complete_graph(self):
5769                 """
5770                 Add any deep dependencies of required sets (args, system, world) that
5771                 have not been pulled into the graph yet. This ensures that the graph
5772                 is consistent such that initially satisfied deep dependencies are not
5773                 broken in the new graph. Initially unsatisfied dependencies are
5774                 irrelevant since we only want to avoid breaking dependencies that are
5775                 intially satisfied.
5776
5777                 Since this method can consume enough time to disturb users, it is
5778                 currently only enabled by the --complete-graph option.
5779                 """
5780                 if "--buildpkgonly" in self.myopts or \
5781                         "recurse" not in self.myparams:
5782                         return 1
5783
5784                 if "complete" not in self.myparams:
5785                         # Skip this to avoid consuming enough time to disturb users.
5786                         return 1
5787
5788                 # Put the depgraph into a mode that causes it to only
5789                 # select packages that have already been added to the
5790                 # graph or those that are installed and have not been
5791                 # scheduled for replacement. Also, toggle the "deep"
5792                 # parameter so that all dependencies are traversed and
5793                 # accounted for.
5794                 self._select_atoms = self._select_atoms_from_graph
5795                 self._select_package = self._select_pkg_from_graph
5796                 already_deep = "deep" in self.myparams
5797                 if not already_deep:
5798                         self.myparams.add("deep")
5799
5800                 for root in self.roots:
5801                         required_set_names = self._required_set_names.copy()
5802                         if root == self.target_root and \
5803                                 (already_deep or "empty" in self.myparams):
5804                                 required_set_names.difference_update(self._sets)
5805                         if not required_set_names and not self._ignored_deps:
5806                                 continue
5807                         root_config = self.roots[root]
5808                         setconfig = root_config.setconfig
5809                         args = []
5810                         # Reuse existing SetArg instances when available.
5811                         for arg in self.digraph.root_nodes():
5812                                 if not isinstance(arg, SetArg):
5813                                         continue
5814                                 if arg.root_config != root_config:
5815                                         continue
5816                                 if arg.name in required_set_names:
5817                                         args.append(arg)
5818                                         required_set_names.remove(arg.name)
5819                         # Create new SetArg instances only when necessary.
5820                         for s in required_set_names:
5821                                 expanded_set = InternalPackageSet(
5822                                         initial_atoms=setconfig.getSetAtoms(s))
5823                                 atom = SETPREFIX + s
5824                                 args.append(SetArg(arg=atom, set=expanded_set,
5825                                         root_config=root_config))
5826                         vardb = root_config.trees["vartree"].dbapi
5827                         for arg in args:
5828                                 for atom in arg.set:
5829                                         self._dep_stack.append(
5830                                                 Dependency(atom=atom, root=root, parent=arg))
5831                         if self._ignored_deps:
5832                                 self._dep_stack.extend(self._ignored_deps)
5833                                 self._ignored_deps = []
5834                         if not self._create_graph(allow_unsatisfied=True):
5835                                 return 0
5836                         # Check the unsatisfied deps to see if any initially satisfied deps
5837                         # will become unsatisfied due to an upgrade. Initially unsatisfied
5838                         # deps are irrelevant since we only want to avoid breaking deps
5839                         # that are initially satisfied.
5840                         while self._unsatisfied_deps:
5841                                 dep = self._unsatisfied_deps.pop()
5842                                 matches = vardb.match_pkgs(dep.atom)
5843                                 if not matches:
5844                                         self._initially_unsatisfied_deps.append(dep)
5845                                         continue
5846                                 # An scheduled installation broke a deep dependency.
5847                                 # Add the installed package to the graph so that it
5848                                 # will be appropriately reported as a slot collision
5849                                 # (possibly solvable via backtracking).
5850                                 pkg = matches[-1] # highest match
5851                                 if not self._add_pkg(pkg, dep):
5852                                         return 0
5853                                 if not self._create_graph(allow_unsatisfied=True):
5854                                         return 0
5855                 return 1
5856
5857         def _pkg(self, cpv, type_name, root_config, installed=False):
5858                 """
5859                 Get a package instance from the cache, or create a new
5860                 one if necessary. Raises KeyError from aux_get if it
5861                 failures for some reason (package does not exist or is
5862                 corrupt).
5863                 """
5864                 operation = "merge"
5865                 if installed:
5866                         operation = "nomerge"
5867                 pkg = self._pkg_cache.get(
5868                         (type_name, root_config.root, cpv, operation))
5869                 if pkg is None:
5870                         tree_type = self.pkg_tree_map[type_name]
5871                         db = root_config.trees[tree_type].dbapi
5872                         db_keys = list(self._trees_orig[root_config.root][
5873                                 tree_type].dbapi._aux_cache_keys)
5874                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
5875                         pkg = Package(cpv=cpv, metadata=metadata,
5876                                 root_config=root_config, installed=installed)
5877                         if type_name == "ebuild":
5878                                 settings = self.pkgsettings[root_config.root]
5879                                 settings.setcpv(pkg)
5880                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
5881                         self._pkg_cache[pkg] = pkg
5882                 return pkg
5883
5884         def validate_blockers(self):
5885                 """Remove any blockers from the digraph that do not match any of the
5886                 packages within the graph.  If necessary, create hard deps to ensure
5887                 correct merge order such that mutually blocking packages are never
5888                 installed simultaneously."""
5889
5890                 if "--buildpkgonly" in self.myopts or \
5891                         "--nodeps" in self.myopts:
5892                         return True
5893
5894                 #if "deep" in self.myparams:
5895                 if True:
5896                         # Pull in blockers from all installed packages that haven't already
5897                         # been pulled into the depgraph.  This is not enabled by default
5898                         # due to the performance penalty that is incurred by all the
5899                         # additional dep_check calls that are required.
5900
5901                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
5902                         for myroot in self.trees:
5903                                 vardb = self.trees[myroot]["vartree"].dbapi
5904                                 portdb = self.trees[myroot]["porttree"].dbapi
5905                                 pkgsettings = self.pkgsettings[myroot]
5906                                 final_db = self.mydbapi[myroot]
5907
5908                                 blocker_cache = BlockerCache(myroot, vardb)
5909                                 stale_cache = set(blocker_cache)
5910                                 for pkg in vardb:
5911                                         cpv = pkg.cpv
5912                                         stale_cache.discard(cpv)
5913                                         pkg_in_graph = self.digraph.contains(pkg)
5914
5915                                         # Check for masked installed packages. Only warn about
5916                                         # packages that are in the graph in order to avoid warning
5917                                         # about those that will be automatically uninstalled during
5918                                         # the merge process or by --depclean.
5919                                         if pkg in final_db:
5920                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
5921                                                         self._masked_installed.add(pkg)
5922
5923                                         blocker_atoms = None
5924                                         blockers = None
5925                                         if pkg_in_graph:
5926                                                 blockers = []
5927                                                 try:
5928                                                         blockers.extend(
5929                                                                 self._blocker_parents.child_nodes(pkg))
5930                                                 except KeyError:
5931                                                         pass
5932                                                 try:
5933                                                         blockers.extend(
5934                                                                 self._irrelevant_blockers.child_nodes(pkg))
5935                                                 except KeyError:
5936                                                         pass
5937                                         if blockers is not None:
5938                                                 blockers = set(str(blocker.atom) \
5939                                                         for blocker in blockers)
5940
5941                                         # If this node has any blockers, create a "nomerge"
5942                                         # node for it so that they can be enforced.
5943                                         self.spinner.update()
5944                                         blocker_data = blocker_cache.get(cpv)
5945                                         if blocker_data is not None and \
5946                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
5947                                                 blocker_data = None
5948
5949                                         # If blocker data from the graph is available, use
5950                                         # it to validate the cache and update the cache if
5951                                         # it seems invalid.
5952                                         if blocker_data is not None and \
5953                                                 blockers is not None:
5954                                                 if not blockers.symmetric_difference(
5955                                                         blocker_data.atoms):
5956                                                         continue
5957                                                 blocker_data = None
5958
5959                                         if blocker_data is None and \
5960                                                 blockers is not None:
5961                                                 # Re-use the blockers from the graph.
5962                                                 blocker_atoms = sorted(blockers)
5963                                                 counter = long(pkg.metadata["COUNTER"])
5964                                                 blocker_data = \
5965                                                         blocker_cache.BlockerData(counter, blocker_atoms)
5966                                                 blocker_cache[pkg.cpv] = blocker_data
5967                                                 continue
5968
5969                                         if blocker_data:
5970                                                 blocker_atoms = blocker_data.atoms
5971                                         else:
5972                                                 # Use aux_get() to trigger FakeVartree global
5973                                                 # updates on *DEPEND when appropriate.
5974                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
5975                                                 # It is crucial to pass in final_db here in order to
5976                                                 # optimize dep_check calls by eliminating atoms via
5977                                                 # dep_wordreduce and dep_eval calls.
5978                                                 try:
5979                                                         portage.dep._dep_check_strict = False
5980                                                         try:
5981                                                                 success, atoms = portage.dep_check(depstr,
5982                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
5983                                                                         trees=self._graph_trees, myroot=myroot)
5984                                                         except Exception, e:
5985                                                                 if isinstance(e, SystemExit):
5986                                                                         raise
5987                                                                 # This is helpful, for example, if a ValueError
5988                                                                 # is thrown from cpv_expand due to multiple
5989                                                                 # matches (this can happen if an atom lacks a
5990                                                                 # category).
5991                                                                 show_invalid_depstring_notice(
5992                                                                         pkg, depstr, str(e))
5993                                                                 del e
5994                                                                 raise
5995                                                 finally:
5996                                                         portage.dep._dep_check_strict = True
5997                                                 if not success:
5998                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
5999                                                         if replacement_pkg and \
6000                                                                 replacement_pkg[0].operation == "merge":
6001                                                                 # This package is being replaced anyway, so
6002                                                                 # ignore invalid dependencies so as not to
6003                                                                 # annoy the user too much (otherwise they'd be
6004                                                                 # forced to manually unmerge it first).
6005                                                                 continue
6006                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6007                                                         return False
6008                                                 blocker_atoms = [myatom for myatom in atoms \
6009                                                         if myatom.startswith("!")]
6010                                                 blocker_atoms.sort()
6011                                                 counter = long(pkg.metadata["COUNTER"])
6012                                                 blocker_cache[cpv] = \
6013                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6014                                         if blocker_atoms:
6015                                                 try:
6016                                                         for atom in blocker_atoms:
6017                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6018                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6019                                                                 self._blocker_parents.add(blocker, pkg)
6020                                                 except portage.exception.InvalidAtom, e:
6021                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6022                                                         show_invalid_depstring_notice(
6023                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6024                                                         return False
6025                                 for cpv in stale_cache:
6026                                         del blocker_cache[cpv]
6027                                 blocker_cache.flush()
6028                                 del blocker_cache
6029
6030                 # Discard any "uninstall" tasks scheduled by previous calls
6031                 # to this method, since those tasks may not make sense given
6032                 # the current graph state.
6033                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6034                 if previous_uninstall_tasks:
6035                         self._blocker_uninstalls = digraph()
6036                         self.digraph.difference_update(previous_uninstall_tasks)
6037
6038                 for blocker in self._blocker_parents.leaf_nodes():
6039                         self.spinner.update()
6040                         root_config = self.roots[blocker.root]
6041                         virtuals = root_config.settings.getvirtuals()
6042                         myroot = blocker.root
6043                         initial_db = self.trees[myroot]["vartree"].dbapi
6044                         final_db = self.mydbapi[myroot]
6045                         
6046                         provider_virtual = False
6047                         if blocker.cp in virtuals and \
6048                                 not self._have_new_virt(blocker.root, blocker.cp):
6049                                 provider_virtual = True
6050
6051                         if provider_virtual:
6052                                 atoms = []
6053                                 for provider_entry in virtuals[blocker.cp]:
6054                                         provider_cp = \
6055                                                 portage.dep_getkey(provider_entry)
6056                                         atoms.append(blocker.atom.replace(
6057                                                 blocker.cp, provider_cp))
6058                         else:
6059                                 atoms = [blocker.atom]
6060
6061                         blocked_initial = []
6062                         for atom in atoms:
6063                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6064
6065                         blocked_final = []
6066                         for atom in atoms:
6067                                 blocked_final.extend(final_db.match_pkgs(atom))
6068
6069                         if not blocked_initial and not blocked_final:
6070                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6071                                 self._blocker_parents.remove(blocker)
6072                                 # Discard any parents that don't have any more blockers.
6073                                 for pkg in parent_pkgs:
6074                                         self._irrelevant_blockers.add(blocker, pkg)
6075                                         if not self._blocker_parents.child_nodes(pkg):
6076                                                 self._blocker_parents.remove(pkg)
6077                                 continue
6078                         for parent in self._blocker_parents.parent_nodes(blocker):
6079                                 unresolved_blocks = False
6080                                 depends_on_order = set()
6081                                 for pkg in blocked_initial:
6082                                         if pkg.slot_atom == parent.slot_atom:
6083                                                 # TODO: Support blocks within slots in cases where it
6084                                                 # might make sense.  For example, a new version might
6085                                                 # require that the old version be uninstalled at build
6086                                                 # time.
6087                                                 continue
6088                                         if parent.installed:
6089                                                 # Two currently installed packages conflict with
6090                                                 # eachother. Ignore this case since the damage
6091                                                 # is already done and this would be likely to
6092                                                 # confuse users if displayed like a normal blocker.
6093                                                 continue
6094                                         if parent.operation == "merge":
6095                                                 # Maybe the blocked package can be replaced or simply
6096                                                 # unmerged to resolve this block.
6097                                                 depends_on_order.add((pkg, parent))
6098                                                 continue
6099                                         # None of the above blocker resolutions techniques apply,
6100                                         # so apparently this one is unresolvable.
6101                                         unresolved_blocks = True
6102                                 for pkg in blocked_final:
6103                                         if pkg.slot_atom == parent.slot_atom:
6104                                                 # TODO: Support blocks within slots.
6105                                                 continue
6106                                         if parent.operation == "nomerge" and \
6107                                                 pkg.operation == "nomerge":
6108                                                 # This blocker will be handled the next time that a
6109                                                 # merge of either package is triggered.
6110                                                 continue
6111
6112                                         # Maybe the blocking package can be
6113                                         # unmerged to resolve this block.
6114                                         if parent.operation == "merge" and pkg.installed:
6115                                                 depends_on_order.add((pkg, parent))
6116                                                 continue
6117                                         elif parent.operation == "nomerge":
6118                                                 depends_on_order.add((parent, pkg))
6119                                                 continue
6120                                         # None of the above blocker resolutions techniques apply,
6121                                         # so apparently this one is unresolvable.
6122                                         unresolved_blocks = True
6123
6124                                 # Make sure we don't unmerge any package that have been pulled
6125                                 # into the graph.
6126                                 if not unresolved_blocks and depends_on_order:
6127                                         for inst_pkg, inst_task in depends_on_order:
6128                                                 if self.digraph.contains(inst_pkg) and \
6129                                                         self.digraph.parent_nodes(inst_pkg):
6130                                                         unresolved_blocks = True
6131                                                         break
6132
6133                                 if not unresolved_blocks and depends_on_order:
6134                                         for inst_pkg, inst_task in depends_on_order:
6135                                                 uninst_task = Package(built=inst_pkg.built,
6136                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6137                                                         metadata=inst_pkg.metadata,
6138                                                         operation="uninstall",
6139                                                         root_config=inst_pkg.root_config,
6140                                                         type_name=inst_pkg.type_name)
6141                                                 self._pkg_cache[uninst_task] = uninst_task
6142                                                 # Enforce correct merge order with a hard dep.
6143                                                 self.digraph.addnode(uninst_task, inst_task,
6144                                                         priority=BlockerDepPriority.instance)
6145                                                 # Count references to this blocker so that it can be
6146                                                 # invalidated after nodes referencing it have been
6147                                                 # merged.
6148                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6149                                 if not unresolved_blocks and not depends_on_order:
6150                                         self._irrelevant_blockers.add(blocker, parent)
6151                                         self._blocker_parents.remove_edge(blocker, parent)
6152                                         if not self._blocker_parents.parent_nodes(blocker):
6153                                                 self._blocker_parents.remove(blocker)
6154                                         if not self._blocker_parents.child_nodes(parent):
6155                                                 self._blocker_parents.remove(parent)
6156                                 if unresolved_blocks:
6157                                         self._unsolvable_blockers.add(blocker, parent)
6158
6159                 return True
6160
6161         def _accept_blocker_conflicts(self):
6162                 acceptable = False
6163                 for x in ("--buildpkgonly", "--fetchonly",
6164                         "--fetch-all-uri", "--nodeps", "--pretend"):
6165                         if x in self.myopts:
6166                                 acceptable = True
6167                                 break
6168                 return acceptable
6169
6170         def _merge_order_bias(self, mygraph):
6171                 """Order nodes from highest to lowest overall reference count for
6172                 optimal leaf node selection."""
6173                 node_info = {}
6174                 for node in mygraph.order:
6175                         node_info[node] = len(mygraph.parent_nodes(node))
6176                 def cmp_merge_preference(node1, node2):
6177                         return node_info[node2] - node_info[node1]
6178                 mygraph.order.sort(cmp_merge_preference)
6179
6180         def altlist(self, reversed=False):
6181
6182                 while self._serialized_tasks_cache is None:
6183                         self._resolve_conflicts()
6184                         try:
6185                                 self._serialized_tasks_cache, self._scheduler_graph = \
6186                                         self._serialize_tasks()
6187                         except self._serialize_tasks_retry:
6188                                 pass
6189
6190                 retlist = self._serialized_tasks_cache[:]
6191                 if reversed:
6192                         retlist.reverse()
6193                 return retlist
6194
6195         def schedulerGraph(self):
6196                 """
6197                 The scheduler graph is identical to the normal one except that
6198                 uninstall edges are reversed in specific cases that require
6199                 conflicting packages to be temporarily installed simultaneously.
6200                 This is intended for use by the Scheduler in it's parallelization
6201                 logic. It ensures that temporary simultaneous installation of
6202                 conflicting packages is avoided when appropriate (especially for
6203                 !!atom blockers), but allowed in specific cases that require it.
6204
6205                 Note that this method calls break_refs() which alters the state of
6206                 internal Package instances such that this depgraph instance should
6207                 not be used to perform any more calculations.
6208                 """
6209                 if self._scheduler_graph is None:
6210                         self.altlist()
6211                 self.break_refs(self._scheduler_graph.order)
6212                 return self._scheduler_graph
6213
6214         def break_refs(self, nodes):
6215                 """
6216                 Take a mergelist like that returned from self.altlist() and
6217                 break any references that lead back to the depgraph. This is
6218                 useful if you want to hold references to packages without
6219                 also holding the depgraph on the heap.
6220                 """
6221                 for node in nodes:
6222                         if hasattr(node, "root_config"):
6223                                 # The FakeVartree references the _package_cache which
6224                                 # references the depgraph. So that Package instances don't
6225                                 # hold the depgraph and FakeVartree on the heap, replace
6226                                 # the RootConfig that references the FakeVartree with the
6227                                 # original RootConfig instance which references the actual
6228                                 # vartree.
6229                                 node.root_config = \
6230                                         self._trees_orig[node.root_config.root]["root_config"]
6231
6232         def _resolve_conflicts(self):
6233                 if not self._complete_graph():
6234                         raise self._unknown_internal_error()
6235
6236                 if not self.validate_blockers():
6237                         raise self._unknown_internal_error()
6238
6239         def _serialize_tasks(self):
6240                 scheduler_graph = self.digraph.copy()
6241                 mygraph=self.digraph.copy()
6242                 # Prune "nomerge" root nodes if nothing depends on them, since
6243                 # otherwise they slow down merge order calculation. Don't remove
6244                 # non-root nodes since they help optimize merge order in some cases
6245                 # such as revdep-rebuild.
6246                 removed_nodes = set()
6247                 while True:
6248                         for node in mygraph.root_nodes():
6249                                 if not isinstance(node, Package) or \
6250                                         node.installed or node.onlydeps:
6251                                         removed_nodes.add(node)
6252                         if removed_nodes:
6253                                 self.spinner.update()
6254                                 mygraph.difference_update(removed_nodes)
6255                         if not removed_nodes:
6256                                 break
6257                         removed_nodes.clear()
6258                 self._merge_order_bias(mygraph)
6259                 def cmp_circular_bias(n1, n2):
6260                         """
6261                         RDEPEND is stronger than PDEPEND and this function
6262                         measures such a strength bias within a circular
6263                         dependency relationship.
6264                         """
6265                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6266                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6267                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6268                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6269                         if n1_n2_medium == n2_n1_medium:
6270                                 return 0
6271                         elif n1_n2_medium:
6272                                 return 1
6273                         return -1
6274                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6275                 retlist=[]
6276                 # Contains uninstall tasks that have been scheduled to
6277                 # occur after overlapping blockers have been installed.
6278                 scheduled_uninstalls = set()
6279                 # Contains any Uninstall tasks that have been ignored
6280                 # in order to avoid the circular deps code path. These
6281                 # correspond to blocker conflicts that could not be
6282                 # resolved.
6283                 ignored_uninstall_tasks = set()
6284                 have_uninstall_task = False
6285                 complete = "complete" in self.myparams
6286                 myblocker_parents = self._blocker_parents.copy()
6287                 asap_nodes = []
6288
6289                 def get_nodes(**kwargs):
6290                         """
6291                         Returns leaf nodes excluding Uninstall instances
6292                         since those should be executed as late as possible.
6293                         """
6294                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6295                                 if isinstance(node, Package) and \
6296                                         (node.operation != "uninstall" or \
6297                                         node in scheduled_uninstalls)]
6298
6299                 # sys-apps/portage needs special treatment if ROOT="/"
6300                 running_root = self._running_root.root
6301                 from portage.const import PORTAGE_PACKAGE_ATOM
6302                 runtime_deps = InternalPackageSet(
6303                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6304                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6305                         PORTAGE_PACKAGE_ATOM)
6306                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6307                         PORTAGE_PACKAGE_ATOM)
6308
6309                 if running_portage:
6310                         running_portage = running_portage[0]
6311                 else:
6312                         running_portage = None
6313
6314                 if replacement_portage:
6315                         replacement_portage = replacement_portage[0]
6316                 else:
6317                         replacement_portage = None
6318
6319                 if replacement_portage == running_portage:
6320                         replacement_portage = None
6321
6322                 if replacement_portage is not None:
6323                         # update from running_portage to replacement_portage asap
6324                         asap_nodes.append(replacement_portage)
6325
6326                 if running_portage is not None:
6327                         try:
6328                                 portage_rdepend = self._select_atoms_highest_available(
6329                                         running_root, running_portage.metadata["RDEPEND"],
6330                                         myuse=running_portage.use.enabled,
6331                                         parent=running_portage, strict=False)
6332                         except portage.exception.InvalidDependString, e:
6333                                 portage.writemsg("!!! Invalid RDEPEND in " + \
6334                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6335                                         (running_root, running_portage.cpv, e), noiselevel=-1)
6336                                 del e
6337                                 portage_rdepend = []
6338                         runtime_deps.update(atom for atom in portage_rdepend \
6339                                 if not atom.startswith("!"))
6340
6341                 ignore_priority_soft_range = [None]
6342                 ignore_priority_soft_range.extend(
6343                         xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6344                 tree_mode = "--tree" in self.myopts
6345                 # Tracks whether or not the current iteration should prefer asap_nodes
6346                 # if available.  This is set to False when the previous iteration
6347                 # failed to select any nodes.  It is reset whenever nodes are
6348                 # successfully selected.
6349                 prefer_asap = True
6350
6351                 # By default, try to avoid selecting root nodes whenever possible. This
6352                 # helps ensure that the maximimum possible number of soft dependencies
6353                 # have been removed from the graph before their parent nodes have
6354                 # selected. This is especially important when those dependencies are
6355                 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6356                 # CHOST has been changed (like when building a stage3 from a stage2).
6357                 accept_root_node = False
6358
6359                 # State of prefer_asap and accept_root_node flags for successive
6360                 # iterations that loosen the criteria for node selection.
6361                 #
6362                 # iteration   prefer_asap   accept_root_node
6363                 # 1           True          False
6364                 # 2           False         False
6365                 # 3           False         True
6366                 #
6367                 # If no nodes are selected on the 3rd iteration, it is due to
6368                 # unresolved blockers or circular dependencies.
6369
6370                 while not mygraph.empty():
6371                         self.spinner.update()
6372                         selected_nodes = None
6373                         ignore_priority = None
6374                         if prefer_asap and asap_nodes:
6375                                 """ASAP nodes are merged before their soft deps."""
6376                                 asap_nodes = [node for node in asap_nodes \
6377                                         if mygraph.contains(node)]
6378                                 for node in asap_nodes:
6379                                         if not mygraph.child_nodes(node,
6380                                                 ignore_priority=DepPriority.SOFT):
6381                                                 selected_nodes = [node]
6382                                                 asap_nodes.remove(node)
6383                                                 break
6384                         if not selected_nodes and \
6385                                 not (prefer_asap and asap_nodes):
6386                                 for ignore_priority in ignore_priority_soft_range:
6387                                         nodes = get_nodes(ignore_priority=ignore_priority)
6388                                         if nodes:
6389                                                 break
6390                                 if nodes:
6391                                         if ignore_priority is None and not tree_mode:
6392                                                 # Greedily pop all of these nodes since no relationship
6393                                                 # has been ignored.  This optimization destroys --tree
6394                                                 # output, so it's disabled in reversed mode. If there
6395                                                 # is a mix of merge and uninstall nodes, save the
6396                                                 # uninstall nodes from later since sometimes a merge
6397                                                 # node will render an install node unnecessary, and
6398                                                 # we want to avoid doing a separate uninstall task in
6399                                                 # that case.
6400                                                 merge_nodes = [node for node in nodes \
6401                                                         if node.operation == "merge"]
6402                                                 if merge_nodes:
6403                                                         selected_nodes = merge_nodes
6404                                                 else:
6405                                                         selected_nodes = nodes
6406                                         else:
6407                                                 # For optimal merge order:
6408                                                 #  * Only pop one node.
6409                                                 #  * Removing a root node (node without a parent)
6410                                                 #    will not produce a leaf node, so avoid it.
6411                                                 for node in nodes:
6412                                                         if mygraph.parent_nodes(node):
6413                                                                 # found a non-root node
6414                                                                 selected_nodes = [node]
6415                                                                 break
6416                                                 if not selected_nodes and \
6417                                                         (accept_root_node or ignore_priority is None):
6418                                                         # settle for a root node
6419                                                         selected_nodes = [nodes[0]]
6420
6421                         if not selected_nodes:
6422                                 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6423                                 if nodes:
6424                                         """Recursively gather a group of nodes that RDEPEND on
6425                                         eachother.  This ensures that they are merged as a group
6426                                         and get their RDEPENDs satisfied as soon as possible."""
6427                                         def gather_deps(ignore_priority,
6428                                                 mergeable_nodes, selected_nodes, node):
6429                                                 if node in selected_nodes:
6430                                                         return True
6431                                                 if node not in mergeable_nodes:
6432                                                         return False
6433                                                 if node == replacement_portage and \
6434                                                         mygraph.child_nodes(node,
6435                                                         ignore_priority=DepPriority.MEDIUM_SOFT):
6436                                                         # Make sure that portage always has all of it's
6437                                                         # RDEPENDs installed first.
6438                                                         return False
6439                                                 selected_nodes.add(node)
6440                                                 for child in mygraph.child_nodes(node,
6441                                                         ignore_priority=ignore_priority):
6442                                                         if not gather_deps(ignore_priority,
6443                                                                 mergeable_nodes, selected_nodes, child):
6444                                                                 return False
6445                                                 return True
6446                                         mergeable_nodes = set(nodes)
6447                                         if prefer_asap and asap_nodes:
6448                                                 nodes = asap_nodes
6449                                         for ignore_priority in xrange(DepPriority.SOFT,
6450                                                 DepPriority.MEDIUM_SOFT + 1):
6451                                                 for node in nodes:
6452                                                         if nodes is not asap_nodes and \
6453                                                                 not accept_root_node and \
6454                                                                 not mygraph.parent_nodes(node):
6455                                                                 continue
6456                                                         selected_nodes = set()
6457                                                         if gather_deps(ignore_priority,
6458                                                                 mergeable_nodes, selected_nodes, node):
6459                                                                 break
6460                                                         else:
6461                                                                 selected_nodes = None
6462                                                 if selected_nodes:
6463                                                         break
6464
6465                                         # If any nodes have been selected here, it's always
6466                                         # possible that anything up to a MEDIUM_SOFT priority
6467                                         # relationship has been ignored. This state is recorded
6468                                         # in ignore_priority so that relevant nodes will be
6469                                         # added to asap_nodes when appropriate.
6470                                         if selected_nodes:
6471                                                 ignore_priority = DepPriority.MEDIUM_SOFT
6472
6473                                         if prefer_asap and asap_nodes and not selected_nodes:
6474                                                 # We failed to find any asap nodes to merge, so ignore
6475                                                 # them for the next iteration.
6476                                                 prefer_asap = False
6477                                                 continue
6478
6479                                         if not selected_nodes and not accept_root_node:
6480                                                 # Maybe there are only root nodes left, so accept them
6481                                                 # for the next iteration.
6482                                                 accept_root_node = True
6483                                                 continue
6484
6485                         if selected_nodes and ignore_priority > DepPriority.SOFT:
6486                                 # Try to merge ignored medium deps as soon as possible.
6487                                 for node in selected_nodes:
6488                                         children = set(mygraph.child_nodes(node))
6489                                         soft = children.difference(
6490                                                 mygraph.child_nodes(node,
6491                                                 ignore_priority=DepPriority.SOFT))
6492                                         medium_soft = children.difference(
6493                                                 mygraph.child_nodes(node,
6494                                                 ignore_priority=DepPriority.MEDIUM_SOFT))
6495                                         medium_soft.difference_update(soft)
6496                                         for child in medium_soft:
6497                                                 if child in selected_nodes:
6498                                                         continue
6499                                                 if child in asap_nodes:
6500                                                         continue
6501                                                 asap_nodes.append(child)
6502
6503                         if selected_nodes and len(selected_nodes) > 1:
6504                                 if not isinstance(selected_nodes, list):
6505                                         selected_nodes = list(selected_nodes)
6506                                 selected_nodes.sort(cmp_circular_bias)
6507
6508                         if not selected_nodes and not myblocker_uninstalls.is_empty():
6509                                 # An Uninstall task needs to be executed in order to
6510                                 # avoid conflict if possible.
6511                                 min_parent_deps = None
6512                                 uninst_task = None
6513                                 for task in myblocker_uninstalls.leaf_nodes():
6514                                         # Do some sanity checks so that system or world packages
6515                                         # don't get uninstalled inappropriately here (only really
6516                                         # necessary when --complete-graph has not been enabled).
6517
6518                                         if task in ignored_uninstall_tasks:
6519                                                 continue
6520
6521                                         if task in scheduled_uninstalls:
6522                                                 # It's been scheduled but it hasn't
6523                                                 # been executed yet due to dependence
6524                                                 # on installation of blocking packages.
6525                                                 continue
6526
6527                                         root_config = self.roots[task.root]
6528                                         inst_pkg = self._pkg_cache[
6529                                                 ("installed", task.root, task.cpv, "nomerge")]
6530
6531                                         if self.digraph.contains(inst_pkg):
6532                                                 continue
6533
6534                                         forbid_overlap = False
6535                                         heuristic_overlap = False
6536                                         for blocker in myblocker_uninstalls.parent_nodes(task):
6537                                                 if blocker.eapi in ("0", "1"):
6538                                                         heuristic_overlap = True
6539                                                 elif blocker.atom.blocker.overlap.forbid:
6540                                                         forbid_overlap = True
6541                                                         break
6542                                         if forbid_overlap and running_root == task.root:
6543                                                 continue
6544
6545                                         if heuristic_overlap and running_root == task.root:
6546                                                 # Never uninstall sys-apps/portage or it's essential
6547                                                 # dependencies, except through replacement.
6548                                                 try:
6549                                                         runtime_dep_atoms = \
6550                                                                 list(runtime_deps.iterAtomsForPackage(task))
6551                                                 except portage.exception.InvalidDependString, e:
6552                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6553                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6554                                                                 (task.root, task.cpv, e), noiselevel=-1)
6555                                                         del e
6556                                                         continue
6557
6558                                                 # Don't uninstall a runtime dep if it appears
6559                                                 # to be the only suitable one installed.
6560                                                 skip = False
6561                                                 vardb = root_config.trees["vartree"].dbapi
6562                                                 for atom in runtime_dep_atoms:
6563                                                         other_version = None
6564                                                         for pkg in vardb.match_pkgs(atom):
6565                                                                 if pkg.cpv == task.cpv and \
6566                                                                         pkg.metadata["COUNTER"] == \
6567                                                                         task.metadata["COUNTER"]:
6568                                                                         continue
6569                                                                 other_version = pkg
6570                                                                 break
6571                                                         if other_version is None:
6572                                                                 skip = True
6573                                                                 break
6574                                                 if skip:
6575                                                         continue
6576
6577                                                 # For packages in the system set, don't take
6578                                                 # any chances. If the conflict can't be resolved
6579                                                 # by a normal replacement operation then abort.
6580                                                 skip = False
6581                                                 try:
6582                                                         for atom in root_config.sets[
6583                                                                 "system"].iterAtomsForPackage(task):
6584                                                                 skip = True
6585                                                                 break
6586                                                 except portage.exception.InvalidDependString, e:
6587                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6588                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6589                                                                 (task.root, task.cpv, e), noiselevel=-1)
6590                                                         del e
6591                                                         skip = True
6592                                                 if skip:
6593                                                         continue
6594
6595                                         # Note that the world check isn't always
6596                                         # necessary since self._complete_graph() will
6597                                         # add all packages from the system and world sets to the
6598                                         # graph. This just allows unresolved conflicts to be
6599                                         # detected as early as possible, which makes it possible
6600                                         # to avoid calling self._complete_graph() when it is
6601                                         # unnecessary due to blockers triggering an abortion.
6602                                         if not complete:
6603                                                 # For packages in the world set, go ahead an uninstall
6604                                                 # when necessary, as long as the atom will be satisfied
6605                                                 # in the final state.
6606                                                 graph_db = self.mydbapi[task.root]
6607                                                 skip = False
6608                                                 try:
6609                                                         for atom in root_config.sets[
6610                                                                 "world"].iterAtomsForPackage(task):
6611                                                                 satisfied = False
6612                                                                 for pkg in graph_db.match_pkgs(atom):
6613                                                                         if pkg == inst_pkg:
6614                                                                                 continue
6615                                                                         satisfied = True
6616                                                                         break
6617                                                                 if not satisfied:
6618                                                                         skip = True
6619                                                                         break
6620                                                 except portage.exception.InvalidDependString, e:
6621                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
6622                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
6623                                                                 (task.root, task.cpv, e), noiselevel=-1)
6624                                                         del e
6625                                                         skip = True
6626                                                 if skip:
6627                                                         continue
6628
6629                                         # Check the deps of parent nodes to ensure that
6630                                         # the chosen task produces a leaf node. Maybe
6631                                         # this can be optimized some more to make the
6632                                         # best possible choice, but the current algorithm
6633                                         # is simple and should be near optimal for most
6634                                         # common cases.
6635                                         parent_deps = set()
6636                                         for parent in mygraph.parent_nodes(task):
6637                                                 parent_deps.update(mygraph.child_nodes(parent,
6638                                                         ignore_priority=DepPriority.MEDIUM_SOFT))
6639                                         parent_deps.remove(task)
6640                                         if min_parent_deps is None or \
6641                                                 len(parent_deps) < min_parent_deps:
6642                                                 min_parent_deps = len(parent_deps)
6643                                                 uninst_task = task
6644
6645                                 if uninst_task is not None:
6646                                         # The uninstall is performed only after blocking
6647                                         # packages have been merged on top of it. File
6648                                         # collisions between blocking packages are detected
6649                                         # and removed from the list of files to be uninstalled.
6650                                         scheduled_uninstalls.add(uninst_task)
6651                                         parent_nodes = mygraph.parent_nodes(uninst_task)
6652
6653                                         # Reverse the parent -> uninstall edges since we want
6654                                         # to do the uninstall after blocking packages have
6655                                         # been merged on top of it.
6656                                         mygraph.remove(uninst_task)
6657                                         for blocked_pkg in parent_nodes:
6658                                                 mygraph.add(blocked_pkg, uninst_task,
6659                                                         priority=BlockerDepPriority.instance)
6660                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
6661                                                 scheduler_graph.add(blocked_pkg, uninst_task,
6662                                                         priority=BlockerDepPriority.instance)
6663
6664                                 else:
6665                                         # None of the Uninstall tasks are acceptable, so
6666                                         # the corresponding blockers are unresolvable.
6667                                         # We need to drop an Uninstall task here in order
6668                                         # to avoid the circular deps code path, but the
6669                                         # blocker will still be counted as an unresolved
6670                                         # conflict.
6671                                         for node in myblocker_uninstalls.leaf_nodes():
6672                                                 try:
6673                                                         mygraph.remove(node)
6674                                                 except KeyError:
6675                                                         pass
6676                                                 else:
6677                                                         ignored_uninstall_tasks.add(node)
6678                                                         break
6679
6680                                 # After dropping an Uninstall task, reset
6681                                 # the state variables for leaf node selection and
6682                                 # continue trying to select leaf nodes.
6683                                 prefer_asap = True
6684                                 accept_root_node = False
6685                                 continue
6686
6687                         if not selected_nodes:
6688                                 self._circular_deps_for_display = mygraph
6689                                 raise self._unknown_internal_error()
6690
6691                         # At this point, we've succeeded in selecting one or more nodes, so
6692                         # it's now safe to reset the prefer_asap and accept_root_node flags
6693                         # to their default states.
6694                         prefer_asap = True
6695                         accept_root_node = False
6696
6697                         mygraph.difference_update(selected_nodes)
6698
6699                         for node in selected_nodes:
6700                                 if isinstance(node, Package) and \
6701                                         node.operation == "nomerge":
6702                                         continue
6703
6704                                 # Handle interactions between blockers
6705                                 # and uninstallation tasks.
6706                                 solved_blockers = set()
6707                                 uninst_task = None
6708                                 if isinstance(node, Package) and \
6709                                         "uninstall" == node.operation:
6710                                         have_uninstall_task = True
6711                                         uninst_task = node
6712                                 else:
6713                                         vardb = self.trees[node.root]["vartree"].dbapi
6714                                         previous_cpv = vardb.match(node.slot_atom)
6715                                         if previous_cpv:
6716                                                 # The package will be replaced by this one, so remove
6717                                                 # the corresponding Uninstall task if necessary.
6718                                                 previous_cpv = previous_cpv[0]
6719                                                 uninst_task = \
6720                                                         ("installed", node.root, previous_cpv, "uninstall")
6721                                                 try:
6722                                                         mygraph.remove(uninst_task)
6723                                                 except KeyError:
6724                                                         pass
6725
6726                                 if uninst_task is not None and \
6727                                         uninst_task not in ignored_uninstall_tasks and \
6728                                         myblocker_uninstalls.contains(uninst_task):
6729                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
6730                                         myblocker_uninstalls.remove(uninst_task)
6731                                         # Discard any blockers that this Uninstall solves.
6732                                         for blocker in blocker_nodes:
6733                                                 if not myblocker_uninstalls.child_nodes(blocker):
6734                                                         myblocker_uninstalls.remove(blocker)
6735                                                         solved_blockers.add(blocker)
6736
6737                                 retlist.append(node)
6738
6739                                 if (isinstance(node, Package) and \
6740                                         "uninstall" == node.operation) or \
6741                                         (uninst_task is not None and \
6742                                         uninst_task in scheduled_uninstalls):
6743                                         # Include satisfied blockers in the merge list
6744                                         # since the user might be interested and also
6745                                         # it serves as an indicator that blocking packages
6746                                         # will be temporarily installed simultaneously.
6747                                         for blocker in solved_blockers:
6748                                                 retlist.append(Blocker(atom=blocker.atom,
6749                                                         root=blocker.root, eapi=blocker.eapi,
6750                                                         satisfied=True))
6751
6752                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
6753                 for node in myblocker_uninstalls.root_nodes():
6754                         unsolvable_blockers.add(node)
6755
6756                 for blocker in unsolvable_blockers:
6757                         retlist.append(blocker)
6758
6759                 # If any Uninstall tasks need to be executed in order
6760                 # to avoid a conflict, complete the graph with any
6761                 # dependencies that may have been initially
6762                 # neglected (to ensure that unsafe Uninstall tasks
6763                 # are properly identified and blocked from execution).
6764                 if have_uninstall_task and \
6765                         not complete and \
6766                         not unsolvable_blockers:
6767                         self.myparams.add("complete")
6768                         raise self._serialize_tasks_retry("")
6769
6770                 if unsolvable_blockers and \
6771                         not self._accept_blocker_conflicts():
6772                         self._unsatisfied_blockers_for_display = unsolvable_blockers
6773                         self._serialized_tasks_cache = retlist[:]
6774                         self._scheduler_graph = scheduler_graph
6775                         raise self._unknown_internal_error()
6776
6777                 if self._slot_collision_info and \
6778                         not self._accept_blocker_conflicts():
6779                         self._serialized_tasks_cache = retlist[:]
6780                         self._scheduler_graph = scheduler_graph
6781                         raise self._unknown_internal_error()
6782
6783                 return retlist, scheduler_graph
6784
6785         def _show_circular_deps(self, mygraph):
6786                 # No leaf nodes are available, so we have a circular
6787                 # dependency panic situation.  Reduce the noise level to a
6788                 # minimum via repeated elimination of root nodes since they
6789                 # have no parents and thus can not be part of a cycle.
6790                 while True:
6791                         root_nodes = mygraph.root_nodes(
6792                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6793                         if not root_nodes:
6794                                 break
6795                         mygraph.difference_update(root_nodes)
6796                 # Display the USE flags that are enabled on nodes that are part
6797                 # of dependency cycles in case that helps the user decide to
6798                 # disable some of them.
6799                 display_order = []
6800                 tempgraph = mygraph.copy()
6801                 while not tempgraph.empty():
6802                         nodes = tempgraph.leaf_nodes()
6803                         if not nodes:
6804                                 node = tempgraph.order[0]
6805                         else:
6806                                 node = nodes[0]
6807                         display_order.append(node)
6808                         tempgraph.remove(node)
6809                 display_order.reverse()
6810                 self.myopts.pop("--quiet", None)
6811                 self.myopts.pop("--verbose", None)
6812                 self.myopts["--tree"] = True
6813                 portage.writemsg("\n\n", noiselevel=-1)
6814                 self.display(display_order)
6815                 prefix = colorize("BAD", " * ")
6816                 portage.writemsg("\n", noiselevel=-1)
6817                 portage.writemsg(prefix + "Error: circular dependencies:\n",
6818                         noiselevel=-1)
6819                 portage.writemsg("\n", noiselevel=-1)
6820                 mygraph.debug_print()
6821                 portage.writemsg("\n", noiselevel=-1)
6822                 portage.writemsg(prefix + "Note that circular dependencies " + \
6823                         "can often be avoided by temporarily\n", noiselevel=-1)
6824                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
6825                         "optional dependencies.\n", noiselevel=-1)
6826
6827         def _show_merge_list(self):
6828                 if self._serialized_tasks_cache is not None and \
6829                         not (self._displayed_list and \
6830                         (self._displayed_list == self._serialized_tasks_cache or \
6831                         self._displayed_list == \
6832                                 list(reversed(self._serialized_tasks_cache)))):
6833                         display_list = self._serialized_tasks_cache[:]
6834                         if "--tree" in self.myopts:
6835                                 display_list.reverse()
6836                         self.display(display_list)
6837
6838         def _show_unsatisfied_blockers(self, blockers):
6839                 self._show_merge_list()
6840                 msg = "Error: The above package list contains " + \
6841                         "packages which cannot be installed " + \
6842                         "at the same time on the same system."
6843                 prefix = colorize("BAD", " * ")
6844                 from textwrap import wrap
6845                 portage.writemsg("\n", noiselevel=-1)
6846                 for line in wrap(msg, 70):
6847                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
6848                 if "--quiet" not in self.myopts:
6849                         show_blocker_docs_link()
6850
6851         def display(self, mylist, favorites=[], verbosity=None):
6852
6853                 # This is used to prevent display_problems() from
6854                 # redundantly displaying this exact same merge list
6855                 # again via _show_merge_list().
6856                 self._displayed_list = mylist
6857
6858                 if verbosity is None:
6859                         verbosity = ("--quiet" in self.myopts and 1 or \
6860                                 "--verbose" in self.myopts and 3 or 2)
6861                 favorites_set = InternalPackageSet(favorites)
6862                 oneshot = "--oneshot" in self.myopts or \
6863                         "--onlydeps" in self.myopts
6864                 columns = "--columns" in self.myopts
6865                 changelogs=[]
6866                 p=[]
6867                 blockers = []
6868
6869                 counters = PackageCounters()
6870
6871                 if verbosity == 1 and "--verbose" not in self.myopts:
6872                         def create_use_string(*args):
6873                                 return ""
6874                 else:
6875                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
6876                                 old_iuse, old_use,
6877                                 is_new, reinst_flags,
6878                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
6879                                 alphabetical=("--alphabetical" in self.myopts)):
6880                                 enabled = []
6881                                 if alphabetical:
6882                                         disabled = enabled
6883                                         removed = enabled
6884                                 else:
6885                                         disabled = []
6886                                         removed = []
6887                                 cur_iuse = set(cur_iuse)
6888                                 enabled_flags = cur_iuse.intersection(cur_use)
6889                                 removed_iuse = set(old_iuse).difference(cur_iuse)
6890                                 any_iuse = cur_iuse.union(old_iuse)
6891                                 any_iuse = list(any_iuse)
6892                                 any_iuse.sort()
6893                                 for flag in any_iuse:
6894                                         flag_str = None
6895                                         isEnabled = False
6896                                         reinst_flag = reinst_flags and flag in reinst_flags
6897                                         if flag in enabled_flags:
6898                                                 isEnabled = True
6899                                                 if is_new or flag in old_use and \
6900                                                         (all_flags or reinst_flag):
6901                                                         flag_str = red(flag)
6902                                                 elif flag not in old_iuse:
6903                                                         flag_str = yellow(flag) + "%*"
6904                                                 elif flag not in old_use:
6905                                                         flag_str = green(flag) + "*"
6906                                         elif flag in removed_iuse:
6907                                                 if all_flags or reinst_flag:
6908                                                         flag_str = yellow("-" + flag) + "%"
6909                                                         if flag in old_use:
6910                                                                 flag_str += "*"
6911                                                         flag_str = "(" + flag_str + ")"
6912                                                         removed.append(flag_str)
6913                                                 continue
6914                                         else:
6915                                                 if is_new or flag in old_iuse and \
6916                                                         flag not in old_use and \
6917                                                         (all_flags or reinst_flag):
6918                                                         flag_str = blue("-" + flag)
6919                                                 elif flag not in old_iuse:
6920                                                         flag_str = yellow("-" + flag)
6921                                                         if flag not in iuse_forced:
6922                                                                 flag_str += "%"
6923                                                 elif flag in old_use:
6924                                                         flag_str = green("-" + flag) + "*"
6925                                         if flag_str:
6926                                                 if flag in iuse_forced:
6927                                                         flag_str = "(" + flag_str + ")"
6928                                                 if isEnabled:
6929                                                         enabled.append(flag_str)
6930                                                 else:
6931                                                         disabled.append(flag_str)
6932
6933                                 if alphabetical:
6934                                         ret = " ".join(enabled)
6935                                 else:
6936                                         ret = " ".join(enabled + disabled + removed)
6937                                 if ret:
6938                                         ret = '%s="%s" ' % (name, ret)
6939                                 return ret
6940
6941                 repo_display = RepoDisplay(self.roots)
6942
6943                 tree_nodes = []
6944                 display_list = []
6945                 mygraph = self.digraph.copy()
6946
6947                 # If there are any Uninstall instances, add the corresponding
6948                 # blockers to the digraph (useful for --tree display).
6949
6950                 executed_uninstalls = set(node for node in mylist \
6951                         if isinstance(node, Package) and node.operation == "unmerge")
6952
6953                 for uninstall in self._blocker_uninstalls.leaf_nodes():
6954                         uninstall_parents = \
6955                                 self._blocker_uninstalls.parent_nodes(uninstall)
6956                         if not uninstall_parents:
6957                                 continue
6958
6959                         # Remove the corresponding "nomerge" node and substitute
6960                         # the Uninstall node.
6961                         inst_pkg = self._pkg_cache[
6962                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
6963                         try:
6964                                 mygraph.remove(inst_pkg)
6965                         except KeyError:
6966                                 pass
6967
6968                         try:
6969                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
6970                         except KeyError:
6971                                 inst_pkg_blockers = []
6972
6973                         # Break the Package -> Uninstall edges.
6974                         mygraph.remove(uninstall)
6975
6976                         # Resolution of a package's blockers
6977                         # depend on it's own uninstallation.
6978                         for blocker in inst_pkg_blockers:
6979                                 mygraph.add(uninstall, blocker)
6980
6981                         # Expand Package -> Uninstall edges into
6982                         # Package -> Blocker -> Uninstall edges.
6983                         for blocker in uninstall_parents:
6984                                 mygraph.add(uninstall, blocker)
6985                                 for parent in self._blocker_parents.parent_nodes(blocker):
6986                                         if parent != inst_pkg:
6987                                                 mygraph.add(blocker, parent)
6988
6989                         # If the uninstall task did not need to be executed because
6990                         # of an upgrade, display Blocker -> Upgrade edges since the
6991                         # corresponding Blocker -> Uninstall edges will not be shown.
6992                         upgrade_node = \
6993                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
6994                         if upgrade_node is not None and \
6995                                 uninstall not in executed_uninstalls:
6996                                 for blocker in uninstall_parents:
6997                                         mygraph.add(upgrade_node, blocker)
6998
6999                 unsatisfied_blockers = []
7000                 i = 0
7001                 depth = 0
7002                 shown_edges = set()
7003                 for x in mylist:
7004                         if isinstance(x, Blocker) and not x.satisfied:
7005                                 unsatisfied_blockers.append(x)
7006                                 continue
7007                         graph_key = x
7008                         if "--tree" in self.myopts:
7009                                 depth = len(tree_nodes)
7010                                 while depth and graph_key not in \
7011                                         mygraph.child_nodes(tree_nodes[depth-1]):
7012                                                 depth -= 1
7013                                 if depth:
7014                                         tree_nodes = tree_nodes[:depth]
7015                                         tree_nodes.append(graph_key)
7016                                         display_list.append((x, depth, True))
7017                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7018                                 else:
7019                                         traversed_nodes = set() # prevent endless circles
7020                                         traversed_nodes.add(graph_key)
7021                                         def add_parents(current_node, ordered):
7022                                                 parent_nodes = None
7023                                                 # Do not traverse to parents if this node is an
7024                                                 # an argument or a direct member of a set that has
7025                                                 # been specified as an argument (system or world).
7026                                                 if current_node not in self._set_nodes:
7027                                                         parent_nodes = mygraph.parent_nodes(current_node)
7028                                                 if parent_nodes:
7029                                                         child_nodes = set(mygraph.child_nodes(current_node))
7030                                                         selected_parent = None
7031                                                         # First, try to avoid a direct cycle.
7032                                                         for node in parent_nodes:
7033                                                                 if not isinstance(node, (Blocker, Package)):
7034                                                                         continue
7035                                                                 if node not in traversed_nodes and \
7036                                                                         node not in child_nodes:
7037                                                                         edge = (current_node, node)
7038                                                                         if edge in shown_edges:
7039                                                                                 continue
7040                                                                         selected_parent = node
7041                                                                         break
7042                                                         if not selected_parent:
7043                                                                 # A direct cycle is unavoidable.
7044                                                                 for node in parent_nodes:
7045                                                                         if not isinstance(node, (Blocker, Package)):
7046                                                                                 continue
7047                                                                         if node not in traversed_nodes:
7048                                                                                 edge = (current_node, node)
7049                                                                                 if edge in shown_edges:
7050                                                                                         continue
7051                                                                                 selected_parent = node
7052                                                                                 break
7053                                                         if selected_parent:
7054                                                                 shown_edges.add((current_node, selected_parent))
7055                                                                 traversed_nodes.add(selected_parent)
7056                                                                 add_parents(selected_parent, False)
7057                                                 display_list.append((current_node,
7058                                                         len(tree_nodes), ordered))
7059                                                 tree_nodes.append(current_node)
7060                                         tree_nodes = []
7061                                         add_parents(graph_key, True)
7062                         else:
7063                                 display_list.append((x, depth, True))
7064                 mylist = display_list
7065                 for x in unsatisfied_blockers:
7066                         mylist.append((x, 0, True))
7067
7068                 last_merge_depth = 0
7069                 for i in xrange(len(mylist)-1,-1,-1):
7070                         graph_key, depth, ordered = mylist[i]
7071                         if not ordered and depth == 0 and i > 0 \
7072                                 and graph_key == mylist[i-1][0] and \
7073                                 mylist[i-1][1] == 0:
7074                                 # An ordered node got a consecutive duplicate when the tree was
7075                                 # being filled in.
7076                                 del mylist[i]
7077                                 continue
7078                         if ordered and graph_key[-1] != "nomerge":
7079                                 last_merge_depth = depth
7080                                 continue
7081                         if depth >= last_merge_depth or \
7082                                 i < len(mylist) - 1 and \
7083                                 depth >= mylist[i+1][1]:
7084                                         del mylist[i]
7085
7086                 from portage import flatten
7087                 from portage.dep import use_reduce, paren_reduce
7088                 # files to fetch list - avoids counting a same file twice
7089                 # in size display (verbose mode)
7090                 myfetchlist=[]
7091
7092                 # Use this set to detect when all the "repoadd" strings are "[0]"
7093                 # and disable the entire repo display in this case.
7094                 repoadd_set = set()
7095
7096                 for mylist_index in xrange(len(mylist)):
7097                         x, depth, ordered = mylist[mylist_index]
7098                         pkg_type = x[0]
7099                         myroot = x[1]
7100                         pkg_key = x[2]
7101                         portdb = self.trees[myroot]["porttree"].dbapi
7102                         bindb  = self.trees[myroot]["bintree"].dbapi
7103                         vardb = self.trees[myroot]["vartree"].dbapi
7104                         vartree = self.trees[myroot]["vartree"]
7105                         pkgsettings = self.pkgsettings[myroot]
7106
7107                         fetch=" "
7108                         indent = " " * depth
7109
7110                         if isinstance(x, Blocker):
7111                                 if x.satisfied:
7112                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7113                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7114                                 else:
7115                                         blocker_style = "PKG_BLOCKER"
7116                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7117                                 if ordered:
7118                                         counters.blocks += 1
7119                                         if x.satisfied:
7120                                                 counters.blocks_satisfied += 1
7121                                 resolved = portage.key_expand(
7122                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7123                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7124                                         addl += " " + colorize(blocker_style, resolved)
7125                                 else:
7126                                         addl = "[%s %s] %s%s" % \
7127                                                 (colorize(blocker_style, "blocks"),
7128                                                 addl, indent, colorize(blocker_style, resolved))
7129                                 block_parents = self._blocker_parents.parent_nodes(x)
7130                                 block_parents = set([pnode[2] for pnode in block_parents])
7131                                 block_parents = ", ".join(block_parents)
7132                                 if resolved!=x[2]:
7133                                         addl += colorize(blocker_style,
7134                                                 " (\"%s\" is blocking %s)") % \
7135                                                 (str(x.atom).lstrip("!"), block_parents)
7136                                 else:
7137                                         addl += colorize(blocker_style,
7138                                                 " (is blocking %s)") % block_parents
7139                                 if isinstance(x, Blocker) and x.satisfied:
7140                                         if columns:
7141                                                 continue
7142                                         p.append(addl)
7143                                 else:
7144                                         blockers.append(addl)
7145                         else:
7146                                 pkg_status = x[3]
7147                                 pkg_merge = ordered and pkg_status == "merge"
7148                                 if not pkg_merge and pkg_status == "merge":
7149                                         pkg_status = "nomerge"
7150                                 built = pkg_type != "ebuild"
7151                                 installed = pkg_type == "installed"
7152                                 pkg = x
7153                                 metadata = pkg.metadata
7154                                 ebuild_path = None
7155                                 repo_name = metadata["repository"]
7156                                 if pkg_type == "ebuild":
7157                                         ebuild_path = portdb.findname(pkg_key)
7158                                         if not ebuild_path: # shouldn't happen
7159                                                 raise portage.exception.PackageNotFound(pkg_key)
7160                                         repo_path_real = os.path.dirname(os.path.dirname(
7161                                                 os.path.dirname(ebuild_path)))
7162                                 else:
7163                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7164                                 pkg_use = list(pkg.use.enabled)
7165                                 try:
7166                                         restrict = flatten(use_reduce(paren_reduce(
7167                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7168                                 except portage.exception.InvalidDependString, e:
7169                                         if not pkg.installed:
7170                                                 show_invalid_depstring_notice(x,
7171                                                         pkg.metadata["RESTRICT"], str(e))
7172                                                 del e
7173                                                 return 1
7174                                         restrict = []
7175                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7176                                         "fetch" in restrict:
7177                                         fetch = red("F")
7178                                         if ordered:
7179                                                 counters.restrict_fetch += 1
7180                                         if portdb.fetch_check(pkg_key, pkg_use):
7181                                                 fetch = green("f")
7182                                                 if ordered:
7183                                                         counters.restrict_fetch_satisfied += 1
7184
7185                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7186                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
7187                                 myoldbest = []
7188                                 myinslotlist = None
7189                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7190                                 if vardb.cpv_exists(pkg_key):
7191                                         addl="  "+yellow("R")+fetch+"  "
7192                                         if ordered:
7193                                                 if pkg_merge:
7194                                                         counters.reinst += 1
7195                                                 elif pkg_status == "uninstall":
7196                                                         counters.uninst += 1
7197                                 # filter out old-style virtual matches
7198                                 elif installed_versions and \
7199                                         portage.cpv_getkey(installed_versions[0]) == \
7200                                         portage.cpv_getkey(pkg_key):
7201                                         myinslotlist = vardb.match(pkg.slot_atom)
7202                                         # If this is the first install of a new-style virtual, we
7203                                         # need to filter out old-style virtual matches.
7204                                         if myinslotlist and \
7205                                                 portage.cpv_getkey(myinslotlist[0]) != \
7206                                                 portage.cpv_getkey(pkg_key):
7207                                                 myinslotlist = None
7208                                         if myinslotlist:
7209                                                 myoldbest = myinslotlist[:]
7210                                                 addl = "   " + fetch
7211                                                 if not portage.dep.cpvequal(pkg_key,
7212                                                         portage.best([pkg_key] + myoldbest)):
7213                                                         # Downgrade in slot
7214                                                         addl += turquoise("U")+blue("D")
7215                                                         if ordered:
7216                                                                 counters.downgrades += 1
7217                                                 else:
7218                                                         # Update in slot
7219                                                         addl += turquoise("U") + " "
7220                                                         if ordered:
7221                                                                 counters.upgrades += 1
7222                                         else:
7223                                                 # New slot, mark it new.
7224                                                 addl = " " + green("NS") + fetch + "  "
7225                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7226                                                 if ordered:
7227                                                         counters.newslot += 1
7228
7229                                         if "--changelog" in self.myopts:
7230                                                 inst_matches = vardb.match(pkg.slot_atom)
7231                                                 if inst_matches:
7232                                                         changelogs.extend(self.calc_changelog(
7233                                                                 portdb.findname(pkg_key),
7234                                                                 inst_matches[0], pkg_key))
7235                                 else:
7236                                         addl = " " + green("N") + " " + fetch + "  "
7237                                         if ordered:
7238                                                 counters.new += 1
7239
7240                                 verboseadd = ""
7241                                 repoadd = None
7242
7243                                 if True:
7244                                         # USE flag display
7245                                         forced_flags = set()
7246                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7247                                         forced_flags.update(pkgsettings.useforce)
7248                                         forced_flags.update(pkgsettings.usemask)
7249
7250                                         cur_use = [flag for flag in pkg.use.enabled \
7251                                                 if flag in pkg.iuse.all]
7252                                         cur_iuse = sorted(pkg.iuse.all)
7253
7254                                         if myoldbest and myinslotlist:
7255                                                 previous_cpv = myoldbest[0]
7256                                         else:
7257                                                 previous_cpv = pkg.cpv
7258                                         if vardb.cpv_exists(previous_cpv):
7259                                                 old_iuse, old_use = vardb.aux_get(
7260                                                                 previous_cpv, ["IUSE", "USE"])
7261                                                 old_iuse = list(set(
7262                                                         filter_iuse_defaults(old_iuse.split())))
7263                                                 old_iuse.sort()
7264                                                 old_use = old_use.split()
7265                                                 is_new = False
7266                                         else:
7267                                                 old_iuse = []
7268                                                 old_use = []
7269                                                 is_new = True
7270
7271                                         old_use = [flag for flag in old_use if flag in old_iuse]
7272
7273                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
7274                                         use_expand.sort()
7275                                         use_expand.reverse()
7276                                         use_expand_hidden = \
7277                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7278
7279                                         def map_to_use_expand(myvals, forcedFlags=False,
7280                                                 removeHidden=True):
7281                                                 ret = {}
7282                                                 forced = {}
7283                                                 for exp in use_expand:
7284                                                         ret[exp] = []
7285                                                         forced[exp] = set()
7286                                                         for val in myvals[:]:
7287                                                                 if val.startswith(exp.lower()+"_"):
7288                                                                         if val in forced_flags:
7289                                                                                 forced[exp].add(val[len(exp)+1:])
7290                                                                         ret[exp].append(val[len(exp)+1:])
7291                                                                         myvals.remove(val)
7292                                                 ret["USE"] = myvals
7293                                                 forced["USE"] = [val for val in myvals \
7294                                                         if val in forced_flags]
7295                                                 if removeHidden:
7296                                                         for exp in use_expand_hidden:
7297                                                                 ret.pop(exp, None)
7298                                                 if forcedFlags:
7299                                                         return ret, forced
7300                                                 return ret
7301
7302                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7303                                         # are the only thing that triggered reinstallation.
7304                                         reinst_flags_map = {}
7305                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
7306                                         reinst_expand_map = None
7307                                         if reinstall_for_flags:
7308                                                 reinst_flags_map = map_to_use_expand(
7309                                                         list(reinstall_for_flags), removeHidden=False)
7310                                                 for k in list(reinst_flags_map):
7311                                                         if not reinst_flags_map[k]:
7312                                                                 del reinst_flags_map[k]
7313                                                 if not reinst_flags_map.get("USE"):
7314                                                         reinst_expand_map = reinst_flags_map.copy()
7315                                                         reinst_expand_map.pop("USE", None)
7316                                         if reinst_expand_map and \
7317                                                 not set(reinst_expand_map).difference(
7318                                                 use_expand_hidden):
7319                                                 use_expand_hidden = \
7320                                                         set(use_expand_hidden).difference(
7321                                                         reinst_expand_map)
7322
7323                                         cur_iuse_map, iuse_forced = \
7324                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
7325                                         cur_use_map = map_to_use_expand(cur_use)
7326                                         old_iuse_map = map_to_use_expand(old_iuse)
7327                                         old_use_map = map_to_use_expand(old_use)
7328
7329                                         use_expand.sort()
7330                                         use_expand.insert(0, "USE")
7331                                         
7332                                         for key in use_expand:
7333                                                 if key in use_expand_hidden:
7334                                                         continue
7335                                                 verboseadd += create_use_string(key.upper(),
7336                                                         cur_iuse_map[key], iuse_forced[key],
7337                                                         cur_use_map[key], old_iuse_map[key],
7338                                                         old_use_map[key], is_new,
7339                                                         reinst_flags_map.get(key))
7340
7341                                 if verbosity == 3:
7342                                         # size verbose
7343                                         mysize=0
7344                                         if pkg_type == "ebuild" and pkg_merge:
7345                                                 try:
7346                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
7347                                                                 useflags=pkg_use, debug=self.edebug)
7348                                                 except portage.exception.InvalidDependString, e:
7349                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7350                                                         show_invalid_depstring_notice(x, src_uri, str(e))
7351                                                         del e
7352                                                         return 1
7353                                                 if myfilesdict is None:
7354                                                         myfilesdict="[empty/missing/bad digest]"
7355                                                 else:
7356                                                         for myfetchfile in myfilesdict:
7357                                                                 if myfetchfile not in myfetchlist:
7358                                                                         mysize+=myfilesdict[myfetchfile]
7359                                                                         myfetchlist.append(myfetchfile)
7360                                                         if ordered:
7361                                                                 counters.totalsize += mysize
7362                                                 verboseadd += format_size(mysize)
7363
7364                                         # overlay verbose
7365                                         # assign index for a previous version in the same slot
7366                                         has_previous = False
7367                                         repo_name_prev = None
7368                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7369                                                 metadata["SLOT"])
7370                                         slot_matches = vardb.match(slot_atom)
7371                                         if slot_matches:
7372                                                 has_previous = True
7373                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
7374                                                         ["repository"])[0]
7375
7376                                         # now use the data to generate output
7377                                         if pkg.installed or not has_previous:
7378                                                 repoadd = repo_display.repoStr(repo_path_real)
7379                                         else:
7380                                                 repo_path_prev = None
7381                                                 if repo_name_prev:
7382                                                         repo_path_prev = portdb.getRepositoryPath(
7383                                                                 repo_name_prev)
7384                                                 if repo_path_prev == repo_path_real:
7385                                                         repoadd = repo_display.repoStr(repo_path_real)
7386                                                 else:
7387                                                         repoadd = "%s=>%s" % (
7388                                                                 repo_display.repoStr(repo_path_prev),
7389                                                                 repo_display.repoStr(repo_path_real))
7390                                         if repoadd:
7391                                                 repoadd_set.add(repoadd)
7392
7393                                 xs = [portage.cpv_getkey(pkg_key)] + \
7394                                         list(portage.catpkgsplit(pkg_key)[2:])
7395                                 if xs[2] == "r0":
7396                                         xs[2] = ""
7397                                 else:
7398                                         xs[2] = "-" + xs[2]
7399
7400                                 mywidth = 130
7401                                 if "COLUMNWIDTH" in self.settings:
7402                                         try:
7403                                                 mywidth = int(self.settings["COLUMNWIDTH"])
7404                                         except ValueError, e:
7405                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7406                                                 portage.writemsg(
7407                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7408                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
7409                                                 del e
7410                                 oldlp = mywidth - 30
7411                                 newlp = oldlp - 30
7412
7413                                 # Convert myoldbest from a list to a string.
7414                                 if not myoldbest:
7415                                         myoldbest = ""
7416                                 else:
7417                                         for pos, key in enumerate(myoldbest):
7418                                                 key = portage.catpkgsplit(key)[2] + \
7419                                                         "-" + portage.catpkgsplit(key)[3]
7420                                                 if key[-3:] == "-r0":
7421                                                         key = key[:-3]
7422                                                 myoldbest[pos] = key
7423                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
7424
7425                                 pkg_cp = xs[0]
7426                                 root_config = self.roots[myroot]
7427                                 system_set = root_config.sets["system"]
7428                                 world_set  = root_config.sets["world"]
7429
7430                                 pkg_system = False
7431                                 pkg_world = False
7432                                 try:
7433                                         pkg_system = system_set.findAtomForPackage(pkg)
7434                                         pkg_world  = world_set.findAtomForPackage(pkg)
7435                                         if not (oneshot or pkg_world) and \
7436                                                 myroot == self.target_root and \
7437                                                 favorites_set.findAtomForPackage(pkg):
7438                                                 # Maybe it will be added to world now.
7439                                                 if create_world_atom(pkg, favorites_set, root_config):
7440                                                         pkg_world = True
7441                                 except portage.exception.InvalidDependString:
7442                                         # This is reported elsewhere if relevant.
7443                                         pass
7444
7445                                 def pkgprint(pkg_str):
7446                                         if pkg_merge:
7447                                                 if pkg_system:
7448                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
7449                                                 elif pkg_world:
7450                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
7451                                                 else:
7452                                                         return colorize("PKG_MERGE", pkg_str)
7453                                         elif pkg_status == "uninstall":
7454                                                 return colorize("PKG_UNINSTALL", pkg_str)
7455                                         else:
7456                                                 if pkg_system:
7457                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
7458                                                 elif pkg_world:
7459                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
7460                                                 else:
7461                                                         return colorize("PKG_NOMERGE", pkg_str)
7462
7463                                 try:
7464                                         properties = flatten(use_reduce(paren_reduce(
7465                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
7466                                 except portage.exception.InvalidDependString, e:
7467                                         if not pkg.installed:
7468                                                 show_invalid_depstring_notice(pkg,
7469                                                         pkg.metadata["PROPERTIES"], str(e))
7470                                                 del e
7471                                                 return 1
7472                                         properties = []
7473                                 interactive = "interactive" in properties
7474                                 if interactive and pkg.operation == "merge":
7475                                         addl = colorize("WARN", "I") + addl[1:]
7476                                         if ordered:
7477                                                 counters.interactive += 1
7478
7479                                 if x[1]!="/":
7480                                         if myoldbest:
7481                                                 myoldbest +=" "
7482                                         if "--columns" in self.myopts:
7483                                                 if "--quiet" in self.myopts:
7484                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7485                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
7486                                                         myprint=myprint+myoldbest
7487                                                         myprint=myprint+darkgreen("to "+x[1])
7488                                                         verboseadd = None
7489                                                 else:
7490                                                         if not pkg_merge:
7491                                                                 myprint = "[%s] %s%s" % \
7492                                                                         (pkgprint(pkg_status.ljust(13)),
7493                                                                         indent, pkgprint(pkg.cp))
7494                                                         else:
7495                                                                 myprint = "[%s %s] %s%s" % \
7496                                                                         (pkgprint(pkg.type_name), addl,
7497                                                                         indent, pkgprint(pkg.cp))
7498                                                         if (newlp-nc_len(myprint)) > 0:
7499                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7500                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
7501                                                         if (oldlp-nc_len(myprint)) > 0:
7502                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
7503                                                         myprint=myprint+myoldbest
7504                                                         myprint += darkgreen("to " + pkg.root)
7505                                         else:
7506                                                 if not pkg_merge:
7507                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
7508                                                 else:
7509                                                         myprint = "[" + pkg_type + " " + addl + "] "
7510                                                 myprint += indent + pkgprint(pkg_key) + " " + \
7511                                                         myoldbest + darkgreen("to " + myroot)
7512                                 else:
7513                                         if "--columns" in self.myopts:
7514                                                 if "--quiet" in self.myopts:
7515                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
7516                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
7517                                                         myprint=myprint+myoldbest
7518                                                         verboseadd = None
7519                                                 else:
7520                                                         if not pkg_merge:
7521                                                                 myprint = "[%s] %s%s" % \
7522                                                                         (pkgprint(pkg_status.ljust(13)),
7523                                                                         indent, pkgprint(pkg.cp))
7524                                                         else:
7525                                                                 myprint = "[%s %s] %s%s" % \
7526                                                                         (pkgprint(pkg.type_name), addl,
7527                                                                         indent, pkgprint(pkg.cp))
7528                                                         if (newlp-nc_len(myprint)) > 0:
7529                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
7530                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
7531                                                         if (oldlp-nc_len(myprint)) > 0:
7532                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
7533                                                         myprint += myoldbest
7534                                         else:
7535                                                 if not pkg_merge:
7536                                                         myprint = "[%s] %s%s %s" % \
7537                                                                 (pkgprint(pkg_status.ljust(13)),
7538                                                                 indent, pkgprint(pkg.cpv),
7539                                                                 myoldbest)
7540                                                 else:
7541                                                         myprint = "[%s %s] %s%s %s" % \
7542                                                                 (pkgprint(pkg_type), addl, indent,
7543                                                                 pkgprint(pkg.cpv), myoldbest)
7544
7545                                 if columns and pkg.operation == "uninstall":
7546                                         continue
7547                                 p.append((myprint, verboseadd, repoadd))
7548
7549                                 if "--tree" not in self.myopts and \
7550                                         "--quiet" not in self.myopts and \
7551                                         not self._opts_no_restart.intersection(self.myopts) and \
7552                                         pkg.root == self._running_root.root and \
7553                                         portage.match_from_list(
7554                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
7555                                         not vardb.cpv_exists(pkg.cpv) and \
7556                                         "--quiet" not in self.myopts:
7557                                                 if mylist_index < len(mylist) - 1:
7558                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
7559                                                         p.append(colorize("WARN", "    then resume the merge."))
7560
7561                 out = sys.stdout
7562                 show_repos = repoadd_set and repoadd_set != set(["0"])
7563
7564                 for x in p:
7565                         if isinstance(x, basestring):
7566                                 out.write("%s\n" % (x,))
7567                                 continue
7568
7569                         myprint, verboseadd, repoadd = x
7570
7571                         if verboseadd:
7572                                 myprint += " " + verboseadd
7573
7574                         if show_repos and repoadd:
7575                                 myprint += " " + teal("[%s]" % repoadd)
7576
7577                         out.write("%s\n" % (myprint,))
7578
7579                 for x in blockers:
7580                         print x
7581
7582                 if verbosity == 3:
7583                         print
7584                         print counters
7585                         if show_repos:
7586                                 sys.stdout.write(str(repo_display))
7587
7588                 if "--changelog" in self.myopts:
7589                         print
7590                         for revision,text in changelogs:
7591                                 print bold('*'+revision)
7592                                 sys.stdout.write(text)
7593
7594                 sys.stdout.flush()
7595                 return os.EX_OK
7596
7597         def display_problems(self):
7598                 """
7599                 Display problems with the dependency graph such as slot collisions.
7600                 This is called internally by display() to show the problems _after_
7601                 the merge list where it is most likely to be seen, but if display()
7602                 is not going to be called then this method should be called explicitly
7603                 to ensure that the user is notified of problems with the graph.
7604
7605                 All output goes to stderr, except for unsatisfied dependencies which
7606                 go to stdout for parsing by programs such as autounmask.
7607                 """
7608
7609                 # Note that show_masked_packages() sends it's output to
7610                 # stdout, and some programs such as autounmask parse the
7611                 # output in cases when emerge bails out. However, when
7612                 # show_masked_packages() is called for installed packages
7613                 # here, the message is a warning that is more appropriate
7614                 # to send to stderr, so temporarily redirect stdout to
7615                 # stderr. TODO: Fix output code so there's a cleaner way
7616                 # to redirect everything to stderr.
7617                 sys.stdout.flush()
7618                 sys.stderr.flush()
7619                 stdout = sys.stdout
7620                 try:
7621                         sys.stdout = sys.stderr
7622                         self._display_problems()
7623                 finally:
7624                         sys.stdout = stdout
7625                         sys.stdout.flush()
7626                         sys.stderr.flush()
7627
7628                 # This goes to stdout for parsing by programs like autounmask.
7629                 for pargs, kwargs in self._unsatisfied_deps_for_display:
7630                         self._show_unsatisfied_dep(*pargs, **kwargs)
7631
7632         def _display_problems(self):
7633                 if self._circular_deps_for_display is not None:
7634                         self._show_circular_deps(
7635                                 self._circular_deps_for_display)
7636
7637                 # The user is only notified of a slot conflict if
7638                 # there are no unresolvable blocker conflicts.
7639                 if self._unsatisfied_blockers_for_display is not None:
7640                         self._show_unsatisfied_blockers(
7641                                 self._unsatisfied_blockers_for_display)
7642                 else:
7643                         self._show_slot_collision_notice()
7644
7645                 # TODO: Add generic support for "set problem" handlers so that
7646                 # the below warnings aren't special cases for world only.
7647
7648                 if self._missing_args:
7649                         world_problems = False
7650                         if "world" in self._sets:
7651                                 # Filter out indirect members of world (from nested sets)
7652                                 # since only direct members of world are desired here.
7653                                 world_set = self.roots[self.target_root].sets["world"]
7654                                 for arg, atom in self._missing_args:
7655                                         if arg.name == "world" and atom in world_set:
7656                                                 world_problems = True
7657                                                 break
7658
7659                         if world_problems:
7660                                 sys.stderr.write("\n!!! Problems have been " + \
7661                                         "detected with your world file\n")
7662                                 sys.stderr.write("!!! Please run " + \
7663                                         green("emaint --check world")+"\n\n")
7664
7665                 if self._missing_args:
7666                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7667                                 " Ebuilds for the following packages are either all\n")
7668                         sys.stderr.write(colorize("BAD", "!!!") + \
7669                                 " masked or don't exist:\n")
7670                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
7671                                 self._missing_args) + "\n")
7672
7673                 if self._pprovided_args:
7674                         arg_refs = {}
7675                         for arg, atom in self._pprovided_args:
7676                                 if isinstance(arg, SetArg):
7677                                         parent = arg.name
7678                                         arg_atom = (atom, atom)
7679                                 else:
7680                                         parent = "args"
7681                                         arg_atom = (arg.arg, atom)
7682                                 refs = arg_refs.setdefault(arg_atom, [])
7683                                 if parent not in refs:
7684                                         refs.append(parent)
7685                         msg = []
7686                         msg.append(bad("\nWARNING: "))
7687                         if len(self._pprovided_args) > 1:
7688                                 msg.append("Requested packages will not be " + \
7689                                         "merged because they are listed in\n")
7690                         else:
7691                                 msg.append("A requested package will not be " + \
7692                                         "merged because it is listed in\n")
7693                         msg.append("package.provided:\n\n")
7694                         problems_sets = set()
7695                         for (arg, atom), refs in arg_refs.iteritems():
7696                                 ref_string = ""
7697                                 if refs:
7698                                         problems_sets.update(refs)
7699                                         refs.sort()
7700                                         ref_string = ", ".join(["'%s'" % name for name in refs])
7701                                         ref_string = " pulled in by " + ref_string
7702                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
7703                         msg.append("\n")
7704                         if "world" in problems_sets:
7705                                 msg.append("This problem can be solved in one of the following ways:\n\n")
7706                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
7707                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
7708                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
7709                                 msg.append("The best course of action depends on the reason that an offending\n")
7710                                 msg.append("package.provided entry exists.\n\n")
7711                         sys.stderr.write("".join(msg))
7712
7713                 masked_packages = []
7714                 for pkg in self._masked_installed:
7715                         root_config = pkg.root_config
7716                         pkgsettings = self.pkgsettings[pkg.root]
7717                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
7718                         masked_packages.append((root_config, pkgsettings,
7719                                 pkg.cpv, pkg.metadata, mreasons))
7720                 if masked_packages:
7721                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
7722                                 " The following installed packages are masked:\n")
7723                         show_masked_packages(masked_packages)
7724                         show_mask_docs()
7725                         print
7726
7727         def calc_changelog(self,ebuildpath,current,next):
7728                 if ebuildpath == None or not os.path.exists(ebuildpath):
7729                         return []
7730                 current = '-'.join(portage.catpkgsplit(current)[1:])
7731                 if current.endswith('-r0'):
7732                         current = current[:-3]
7733                 next = '-'.join(portage.catpkgsplit(next)[1:])
7734                 if next.endswith('-r0'):
7735                         next = next[:-3]
7736                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
7737                 try:
7738                         changelog = open(changelogpath).read()
7739                 except SystemExit, e:
7740                         raise # Needed else can't exit
7741                 except:
7742                         return []
7743                 divisions = self.find_changelog_tags(changelog)
7744                 #print 'XX from',current,'to',next
7745                 #for div,text in divisions: print 'XX',div
7746                 # skip entries for all revisions above the one we are about to emerge
7747                 for i in range(len(divisions)):
7748                         if divisions[i][0]==next:
7749                                 divisions = divisions[i:]
7750                                 break
7751                 # find out how many entries we are going to display
7752                 for i in range(len(divisions)):
7753                         if divisions[i][0]==current:
7754                                 divisions = divisions[:i]
7755                                 break
7756                 else:
7757                     # couldnt find the current revision in the list. display nothing
7758                         return []
7759                 return divisions
7760
7761         def find_changelog_tags(self,changelog):
7762                 divs = []
7763                 release = None
7764                 while 1:
7765                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
7766                         if match is None:
7767                                 if release is not None:
7768                                         divs.append((release,changelog))
7769                                 return divs
7770                         if release is not None:
7771                                 divs.append((release,changelog[:match.start()]))
7772                         changelog = changelog[match.end():]
7773                         release = match.group(1)
7774                         if release.endswith('.ebuild'):
7775                                 release = release[:-7]
7776                         if release.endswith('-r0'):
7777                                 release = release[:-3]
7778
7779         def saveNomergeFavorites(self):
7780                 """Find atoms in favorites that are not in the mergelist and add them
7781                 to the world file if necessary."""
7782                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
7783                         "--oneshot", "--onlydeps", "--pretend"):
7784                         if x in self.myopts:
7785                                 return
7786                 root_config = self.roots[self.target_root]
7787                 world_set = root_config.sets["world"]
7788
7789                 world_locked = False
7790                 if hasattr(world_set, "lock"):
7791                         world_set.lock()
7792                         world_locked = True
7793
7794                 if hasattr(world_set, "load"):
7795                         world_set.load() # maybe it's changed on disk
7796
7797                 args_set = self._sets["args"]
7798                 portdb = self.trees[self.target_root]["porttree"].dbapi
7799                 added_favorites = set()
7800                 for x in self._set_nodes:
7801                         pkg_type, root, pkg_key, pkg_status = x
7802                         if pkg_status != "nomerge":
7803                                 continue
7804
7805                         try:
7806                                 myfavkey = create_world_atom(x, args_set, root_config)
7807                                 if myfavkey:
7808                                         if myfavkey in added_favorites:
7809                                                 continue
7810                                         added_favorites.add(myfavkey)
7811                         except portage.exception.InvalidDependString, e:
7812                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
7813                                         (pkg_key, str(e)), noiselevel=-1)
7814                                 writemsg("!!! see '%s'\n\n" % os.path.join(
7815                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
7816                                 del e
7817                 all_added = []
7818                 for k in self._sets:
7819                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
7820                                 continue
7821                         s = SETPREFIX + k
7822                         if s in world_set:
7823                                 continue
7824                         all_added.append(SETPREFIX + k)
7825                 all_added.extend(added_favorites)
7826                 all_added.sort()
7827                 for a in all_added:
7828                         print ">>> Recording %s in \"world\" favorites file..." % \
7829                                 colorize("INFORM", str(a))
7830                 if all_added:
7831                         world_set.update(all_added)
7832
7833                 if world_locked:
7834                         world_set.unlock()
7835
7836         def loadResumeCommand(self, resume_data, skip_masked=False):
7837                 """
7838                 Add a resume command to the graph and validate it in the process.  This
7839                 will raise a PackageNotFound exception if a package is not available.
7840                 """
7841
7842                 if not isinstance(resume_data, dict):
7843                         return False
7844
7845                 mergelist = resume_data.get("mergelist")
7846                 if not isinstance(mergelist, list):
7847                         mergelist = []
7848
7849                 fakedb = self.mydbapi
7850                 trees = self.trees
7851                 serialized_tasks = []
7852                 masked_tasks = []
7853                 for x in mergelist:
7854                         if not (isinstance(x, list) and len(x) == 4):
7855                                 continue
7856                         pkg_type, myroot, pkg_key, action = x
7857                         if pkg_type not in self.pkg_tree_map:
7858                                 continue
7859                         if action != "merge":
7860                                 continue
7861                         tree_type = self.pkg_tree_map[pkg_type]
7862                         mydb = trees[myroot][tree_type].dbapi
7863                         db_keys = list(self._trees_orig[myroot][
7864                                 tree_type].dbapi._aux_cache_keys)
7865                         try:
7866                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
7867                         except KeyError:
7868                                 # It does no exist or it is corrupt.
7869                                 if action == "uninstall":
7870                                         continue
7871                                 raise portage.exception.PackageNotFound(pkg_key)
7872                         installed = action == "uninstall"
7873                         built = pkg_type != "ebuild"
7874                         root_config = self.roots[myroot]
7875                         pkg = Package(built=built, cpv=pkg_key,
7876                                 installed=installed, metadata=metadata,
7877                                 operation=action, root_config=root_config,
7878                                 type_name=pkg_type)
7879                         if pkg_type == "ebuild":
7880                                 pkgsettings = self.pkgsettings[myroot]
7881                                 pkgsettings.setcpv(pkg)
7882                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
7883                         self._pkg_cache[pkg] = pkg
7884
7885                         root_config = self.roots[pkg.root]
7886                         if "merge" == pkg.operation and \
7887                                 not visible(root_config.settings, pkg):
7888                                 if skip_masked:
7889                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
7890                                 else:
7891                                         self._unsatisfied_deps_for_display.append(
7892                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
7893
7894                         fakedb[myroot].cpv_inject(pkg)
7895                         serialized_tasks.append(pkg)
7896                         self.spinner.update()
7897
7898                 if self._unsatisfied_deps_for_display:
7899                         return False
7900
7901                 if not serialized_tasks or "--nodeps" in self.myopts:
7902                         self._serialized_tasks_cache = serialized_tasks
7903                         self._scheduler_graph = self.digraph
7904                 else:
7905                         self._select_package = self._select_pkg_from_graph
7906                         self.myparams.add("selective")
7907
7908                         favorites = resume_data.get("favorites")
7909                         args_set = self._sets["args"]
7910                         if isinstance(favorites, list):
7911                                 args = self._load_favorites(favorites)
7912                         else:
7913                                 args = []
7914
7915                         for task in serialized_tasks:
7916                                 if isinstance(task, Package) and \
7917                                         task.operation == "merge":
7918                                         if not self._add_pkg(task, None):
7919                                                 return False
7920
7921                         # Packages for argument atoms need to be explicitly
7922                         # added via _add_pkg() so that they are included in the
7923                         # digraph (needed at least for --tree display).
7924                         for arg in args:
7925                                 for atom in arg.set:
7926                                         pkg, existing_node = self._select_package(
7927                                                 arg.root_config.root, atom)
7928                                         if existing_node is None and \
7929                                                 pkg is not None:
7930                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
7931                                                         root=pkg.root, parent=arg)):
7932                                                         return False
7933
7934                         # Allow unsatisfied deps here to avoid showing a masking
7935                         # message for an unsatisfied dep that isn't necessarily
7936                         # masked.
7937                         if not self._create_graph(allow_unsatisfied=True):
7938                                 return False
7939                         if masked_tasks or self._unsatisfied_deps:
7940                                 # This probably means that a required package
7941                                 # was dropped via --skipfirst. It makes the
7942                                 # resume list invalid, so convert it to a
7943                                 # UnsatisfiedResumeDep exception.
7944                                 raise self.UnsatisfiedResumeDep(self,
7945                                         masked_tasks + self._unsatisfied_deps)
7946                         self._serialized_tasks_cache = None
7947                         try:
7948                                 self.altlist()
7949                         except self._unknown_internal_error:
7950                                 return False
7951
7952                 return True
7953
7954         def _load_favorites(self, favorites):
7955                 """
7956                 Use a list of favorites to resume state from a
7957                 previous select_files() call. This creates similar
7958                 DependencyArg instances to those that would have
7959                 been created by the original select_files() call.
7960                 This allows Package instances to be matched with
7961                 DependencyArg instances during graph creation.
7962                 """
7963                 root_config = self.roots[self.target_root]
7964                 getSetAtoms = root_config.setconfig.getSetAtoms
7965                 sets = root_config.sets
7966                 args = []
7967                 for x in favorites:
7968                         if not isinstance(x, basestring):
7969                                 continue
7970                         if x in ("system", "world"):
7971                                 x = SETPREFIX + x
7972                         if x.startswith(SETPREFIX):
7973                                 s = x[len(SETPREFIX):]
7974                                 if s not in sets:
7975                                         continue
7976                                 if s in self._sets:
7977                                         continue
7978                                 # Recursively expand sets so that containment tests in
7979                                 # self._get_parent_sets() properly match atoms in nested
7980                                 # sets (like if world contains system).
7981                                 expanded_set = InternalPackageSet(
7982                                         initial_atoms=getSetAtoms(s))
7983                                 self._sets[s] = expanded_set
7984                                 args.append(SetArg(arg=x, set=expanded_set,
7985                                         root_config=root_config))
7986                         else:
7987                                 if not portage.isvalidatom(x):
7988                                         continue
7989                                 args.append(AtomArg(arg=x, atom=x,
7990                                         root_config=root_config))
7991
7992                 # Create the "args" package set from atoms and
7993                 # packages given as arguments.
7994                 args_set = self._sets["args"]
7995                 for arg in args:
7996                         if not isinstance(arg, (AtomArg, PackageArg)):
7997                                 continue
7998                         myatom = arg.atom
7999                         if myatom in args_set:
8000                                 continue
8001                         args_set.add(myatom)
8002                 self._set_atoms.update(chain(*self._sets.itervalues()))
8003                 atom_arg_map = self._atom_arg_map
8004                 for arg in args:
8005                         for atom in arg.set:
8006                                 atom_key = (atom, arg.root_config.root)
8007                                 refs = atom_arg_map.get(atom_key)
8008                                 if refs is None:
8009                                         refs = []
8010                                         atom_arg_map[atom_key] = refs
8011                                         if arg not in refs:
8012                                                 refs.append(arg)
8013                 return args
8014
8015         class UnsatisfiedResumeDep(portage.exception.PortageException):
8016                 """
8017                 A dependency of a resume list is not installed. This
8018                 can occur when a required package is dropped from the
8019                 merge list via --skipfirst.
8020                 """
8021                 def __init__(self, depgraph, value):
8022                         portage.exception.PortageException.__init__(self, value)
8023                         self.depgraph = depgraph
8024
8025         class _internal_exception(portage.exception.PortageException):
8026                 def __init__(self, value=""):
8027                         portage.exception.PortageException.__init__(self, value)
8028
8029         class _unknown_internal_error(_internal_exception):
8030                 """
8031                 Used by the depgraph internally to terminate graph creation.
8032                 The specific reason for the failure should have been dumped
8033                 to stderr, unfortunately, the exact reason for the failure
8034                 may not be known.
8035                 """
8036
8037         class _serialize_tasks_retry(_internal_exception):
8038                 """
8039                 This is raised by the _serialize_tasks() method when it needs to
8040                 be called again for some reason. The only case that it's currently
8041                 used for is when neglected dependencies need to be added to the
8042                 graph in order to avoid making a potentially unsafe decision.
8043                 """
8044
8045         class _dep_check_composite_db(portage.dbapi):
8046                 """
8047                 A dbapi-like interface that is optimized for use in dep_check() calls.
8048                 This is built on top of the existing depgraph package selection logic.
8049                 Some packages that have been added to the graph may be masked from this
8050                 view in order to influence the atom preference selection that occurs
8051                 via dep_check().
8052                 """
8053                 def __init__(self, depgraph, root):
8054                         portage.dbapi.__init__(self)
8055                         self._depgraph = depgraph
8056                         self._root = root
8057                         self._match_cache = {}
8058                         self._cpv_pkg_map = {}
8059
8060                 def match(self, atom):
8061                         ret = self._match_cache.get(atom)
8062                         if ret is not None:
8063                                 return ret[:]
8064                         orig_atom = atom
8065                         if "/" not in atom:
8066                                 atom = self._dep_expand(atom)
8067                         pkg, existing = self._depgraph._select_package(self._root, atom)
8068                         if not pkg:
8069                                 ret = []
8070                         else:
8071                                 # Return the highest available from select_package() as well as
8072                                 # any matching slots in the graph db.
8073                                 slots = set()
8074                                 slots.add(pkg.metadata["SLOT"])
8075                                 atom_cp = portage.dep_getkey(atom)
8076                                 if pkg.cp.startswith("virtual/"):
8077                                         # For new-style virtual lookahead that occurs inside
8078                                         # dep_check(), examine all slots. This is needed
8079                                         # so that newer slots will not unnecessarily be pulled in
8080                                         # when a satisfying lower slot is already installed. For
8081                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8082                                         # there's no need to pull in a newer slot to satisfy a
8083                                         # virtual/jdk dependency.
8084                                         for db, pkg_type, built, installed, db_keys in \
8085                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8086                                                 for cpv in db.match(atom):
8087                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8088                                                                 continue
8089                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8090                                 ret = []
8091                                 if self._visible(pkg):
8092                                         self._cpv_pkg_map[pkg.cpv] = pkg
8093                                         ret.append(pkg.cpv)
8094                                 slots.remove(pkg.metadata["SLOT"])
8095                                 while slots:
8096                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8097                                         pkg, existing = self._depgraph._select_package(
8098                                                 self._root, slot_atom)
8099                                         if not pkg:
8100                                                 continue
8101                                         if not self._visible(pkg):
8102                                                 continue
8103                                         self._cpv_pkg_map[pkg.cpv] = pkg
8104                                         ret.append(pkg.cpv)
8105                                 if ret:
8106                                         self._cpv_sort_ascending(ret)
8107                         self._match_cache[orig_atom] = ret
8108                         return ret[:]
8109
8110                 def _visible(self, pkg):
8111                         if pkg.installed and "selective" not in self._depgraph.myparams:
8112                                 try:
8113                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8114                                 except (StopIteration, portage.exception.InvalidDependString):
8115                                         arg = None
8116                                 if arg:
8117                                         return False
8118                                 if pkg.installed:
8119                                         try:
8120                                                 if not visible(
8121                                                         self._depgraph.pkgsettings[pkg.root], pkg):
8122                                                         return False
8123                                         except portage.exception.InvalidDependString:
8124                                                 pass
8125                         return True
8126
8127                 def _dep_expand(self, atom):
8128                         """
8129                         This is only needed for old installed packages that may
8130                         contain atoms that are not fully qualified with a specific
8131                         category. Emulate the cpv_expand() function that's used by
8132                         dbapi.match() in cases like this. If there are multiple
8133                         matches, it's often due to a new-style virtual that has
8134                         been added, so try to filter those out to avoid raising
8135                         a ValueError.
8136                         """
8137                         root_config = self._depgraph.roots[self._root]
8138                         orig_atom = atom
8139                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8140                         if len(expanded_atoms) > 1:
8141                                 non_virtual_atoms = []
8142                                 for x in expanded_atoms:
8143                                         if not portage.dep_getkey(x).startswith("virtual/"):
8144                                                 non_virtual_atoms.append(x)
8145                                 if len(non_virtual_atoms) == 1:
8146                                         expanded_atoms = non_virtual_atoms
8147                         if len(expanded_atoms) > 1:
8148                                 # compatible with portage.cpv_expand()
8149                                 raise portage.exception.AmbiguousPackageName(
8150                                         [portage.dep_getkey(x) for x in expanded_atoms])
8151                         if expanded_atoms:
8152                                 atom = expanded_atoms[0]
8153                         else:
8154                                 null_atom = insert_category_into_atom(atom, "null")
8155                                 null_cp = portage.dep_getkey(null_atom)
8156                                 cat, atom_pn = portage.catsplit(null_cp)
8157                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8158                                 if virts_p:
8159                                         # Allow the resolver to choose which virtual.
8160                                         atom = insert_category_into_atom(atom, "virtual")
8161                                 else:
8162                                         atom = insert_category_into_atom(atom, "null")
8163                         return atom
8164
8165                 def aux_get(self, cpv, wants):
8166                         metadata = self._cpv_pkg_map[cpv].metadata
8167                         return [metadata.get(x, "") for x in wants]
8168
8169         class _package_cache(dict):
8170                 def __init__(self, depgraph):
8171                         dict.__init__(self)
8172                         self._depgraph = depgraph
8173
8174                 def __setitem__(self, k, v):
8175                         dict.__setitem__(self, k, v)
8176                         root_config = self._depgraph.roots[v.root]
8177                         try:
8178                                 if visible(root_config.settings, v) and \
8179                                         not (v.installed and \
8180                                         v.root_config.settings._getMissingKeywords(v.cpv, v.metadata)):
8181                                         root_config.visible_pkgs.cpv_inject(v)
8182                         except portage.exception.InvalidDependString:
8183                                 pass
8184
8185 class RepoDisplay(object):
8186         def __init__(self, roots):
8187                 self._shown_repos = {}
8188                 self._unknown_repo = False
8189                 repo_paths = set()
8190                 for root_config in roots.itervalues():
8191                         portdir = root_config.settings.get("PORTDIR")
8192                         if portdir:
8193                                 repo_paths.add(portdir)
8194                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
8195                         if overlays:
8196                                 repo_paths.update(overlays.split())
8197                 repo_paths = list(repo_paths)
8198                 self._repo_paths = repo_paths
8199                 self._repo_paths_real = [ os.path.realpath(repo_path) \
8200                         for repo_path in repo_paths ]
8201
8202                 # pre-allocate index for PORTDIR so that it always has index 0.
8203                 for root_config in roots.itervalues():
8204                         portdb = root_config.trees["porttree"].dbapi
8205                         portdir = portdb.porttree_root
8206                         if portdir:
8207                                 self.repoStr(portdir)
8208
8209         def repoStr(self, repo_path_real):
8210                 real_index = -1
8211                 if repo_path_real:
8212                         real_index = self._repo_paths_real.index(repo_path_real)
8213                 if real_index == -1:
8214                         s = "?"
8215                         self._unknown_repo = True
8216                 else:
8217                         shown_repos = self._shown_repos
8218                         repo_paths = self._repo_paths
8219                         repo_path = repo_paths[real_index]
8220                         index = shown_repos.get(repo_path)
8221                         if index is None:
8222                                 index = len(shown_repos)
8223                                 shown_repos[repo_path] = index
8224                         s = str(index)
8225                 return s
8226
8227         def __str__(self):
8228                 output = []
8229                 shown_repos = self._shown_repos
8230                 unknown_repo = self._unknown_repo
8231                 if shown_repos or self._unknown_repo:
8232                         output.append("Portage tree and overlays:\n")
8233                 show_repo_paths = list(shown_repos)
8234                 for repo_path, repo_index in shown_repos.iteritems():
8235                         show_repo_paths[repo_index] = repo_path
8236                 if show_repo_paths:
8237                         for index, repo_path in enumerate(show_repo_paths):
8238                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8239                 if unknown_repo:
8240                         output.append(" "+teal("[?]") + \
8241                                 " indicates that the source repository could not be determined\n")
8242                 return "".join(output)
8243
8244 class PackageCounters(object):
8245
8246         def __init__(self):
8247                 self.upgrades   = 0
8248                 self.downgrades = 0
8249                 self.new        = 0
8250                 self.newslot    = 0
8251                 self.reinst     = 0
8252                 self.uninst     = 0
8253                 self.blocks     = 0
8254                 self.blocks_satisfied         = 0
8255                 self.totalsize  = 0
8256                 self.restrict_fetch           = 0
8257                 self.restrict_fetch_satisfied = 0
8258                 self.interactive              = 0
8259
8260         def __str__(self):
8261                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8262                 myoutput = []
8263                 details = []
8264                 myoutput.append("Total: %s package" % total_installs)
8265                 if total_installs != 1:
8266                         myoutput.append("s")
8267                 if total_installs != 0:
8268                         myoutput.append(" (")
8269                 if self.upgrades > 0:
8270                         details.append("%s upgrade" % self.upgrades)
8271                         if self.upgrades > 1:
8272                                 details[-1] += "s"
8273                 if self.downgrades > 0:
8274                         details.append("%s downgrade" % self.downgrades)
8275                         if self.downgrades > 1:
8276                                 details[-1] += "s"
8277                 if self.new > 0:
8278                         details.append("%s new" % self.new)
8279                 if self.newslot > 0:
8280                         details.append("%s in new slot" % self.newslot)
8281                         if self.newslot > 1:
8282                                 details[-1] += "s"
8283                 if self.reinst > 0:
8284                         details.append("%s reinstall" % self.reinst)
8285                         if self.reinst > 1:
8286                                 details[-1] += "s"
8287                 if self.uninst > 0:
8288                         details.append("%s uninstall" % self.uninst)
8289                         if self.uninst > 1:
8290                                 details[-1] += "s"
8291                 if self.interactive > 0:
8292                         details.append("%s %s" % (self.interactive,
8293                                 colorize("WARN", "interactive")))
8294                 myoutput.append(", ".join(details))
8295                 if total_installs != 0:
8296                         myoutput.append(")")
8297                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8298                 if self.restrict_fetch:
8299                         myoutput.append("\nFetch Restriction: %s package" % \
8300                                 self.restrict_fetch)
8301                         if self.restrict_fetch > 1:
8302                                 myoutput.append("s")
8303                 if self.restrict_fetch_satisfied < self.restrict_fetch:
8304                         myoutput.append(bad(" (%s unsatisfied)") % \
8305                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
8306                 if self.blocks > 0:
8307                         myoutput.append("\nConflict: %s block" % \
8308                                 self.blocks)
8309                         if self.blocks > 1:
8310                                 myoutput.append("s")
8311                         if self.blocks_satisfied < self.blocks:
8312                                 myoutput.append(bad(" (%s unsatisfied)") % \
8313                                         (self.blocks - self.blocks_satisfied))
8314                 return "".join(myoutput)
8315
8316 class PollConstants(object):
8317
8318         """
8319         Provides POLL* constants that are equivalent to those from the
8320         select module, for use by PollSelectAdapter.
8321         """
8322
8323         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
8324         v = 1
8325         for k in names:
8326                 locals()[k] = getattr(select, k, v)
8327                 v *= 2
8328         del k, v
8329
8330 class PollSelectAdapter(PollConstants):
8331
8332         """
8333         Use select to emulate a poll object, for
8334         systems that don't support poll().
8335         """
8336
8337         def __init__(self):
8338                 self._registered = {}
8339                 self._select_args = [[], [], []]
8340
8341         def register(self, fd, *args):
8342                 """
8343                 Only POLLIN is currently supported!
8344                 """
8345                 if len(args) > 1:
8346                         raise TypeError(
8347                                 "register expected at most 2 arguments, got " + \
8348                                 repr(1 + len(args)))
8349
8350                 eventmask = PollConstants.POLLIN | \
8351                         PollConstants.POLLPRI | PollConstants.POLLOUT
8352                 if args:
8353                         eventmask = args[0]
8354
8355                 self._registered[fd] = eventmask
8356                 self._select_args = None
8357
8358         def unregister(self, fd):
8359                 self._select_args = None
8360                 del self._registered[fd]
8361
8362         def poll(self, *args):
8363                 if len(args) > 1:
8364                         raise TypeError(
8365                                 "poll expected at most 2 arguments, got " + \
8366                                 repr(1 + len(args)))
8367
8368                 timeout = None
8369                 if args:
8370                         timeout = args[0]
8371
8372                 select_args = self._select_args
8373                 if select_args is None:
8374                         select_args = [self._registered.keys(), [], []]
8375
8376                 if timeout is not None:
8377                         select_args = select_args[:]
8378                         # Translate poll() timeout args to select() timeout args:
8379                         #
8380                         #          | units        | value(s) for indefinite block
8381                         # ---------|--------------|------------------------------
8382                         #   poll   | milliseconds | omitted, negative, or None
8383                         # ---------|--------------|------------------------------
8384                         #   select | seconds      | omitted
8385                         # ---------|--------------|------------------------------
8386
8387                         if timeout is not None and timeout < 0:
8388                                 timeout = None
8389                         if timeout is not None:
8390                                 select_args.append(timeout / 1000)
8391
8392                 select_events = select.select(*select_args)
8393                 poll_events = []
8394                 for fd in select_events[0]:
8395                         poll_events.append((fd, PollConstants.POLLIN))
8396                 return poll_events
8397
8398 class SequentialTaskQueue(SlotObject):
8399
8400         __slots__ = ("max_jobs", "running_tasks") + \
8401                 ("_dirty", "_scheduling", "_task_queue")
8402
8403         def __init__(self, **kwargs):
8404                 SlotObject.__init__(self, **kwargs)
8405                 self._task_queue = deque()
8406                 self.running_tasks = set()
8407                 if self.max_jobs is None:
8408                         self.max_jobs = 1
8409                 self._dirty = True
8410
8411         def add(self, task):
8412                 self._task_queue.append(task)
8413                 self._dirty = True
8414
8415         def addFront(self, task):
8416                 self._task_queue.appendleft(task)
8417                 self._dirty = True
8418
8419         def schedule(self):
8420
8421                 if not self._dirty:
8422                         return False
8423
8424                 if not self:
8425                         return False
8426
8427                 if self._scheduling:
8428                         # Ignore any recursive schedule() calls triggered via
8429                         # self._task_exit().
8430                         return False
8431
8432                 self._scheduling = True
8433
8434                 task_queue = self._task_queue
8435                 running_tasks = self.running_tasks
8436                 max_jobs = self.max_jobs
8437                 state_changed = False
8438
8439                 while task_queue and \
8440                         (max_jobs is True or len(running_tasks) < max_jobs):
8441                         task = task_queue.popleft()
8442                         cancelled = getattr(task, "cancelled", None)
8443                         if not cancelled:
8444                                 running_tasks.add(task)
8445                                 task.addExitListener(self._task_exit)
8446                                 task.start()
8447                         state_changed = True
8448
8449                 self._dirty = False
8450                 self._scheduling = False
8451
8452                 return state_changed
8453
8454         def _task_exit(self, task):
8455                 """
8456                 Since we can always rely on exit listeners being called, the set of
8457                 running tasks is always pruned automatically and there is never any need
8458                 to actively prune it.
8459                 """
8460                 self.running_tasks.remove(task)
8461                 if self._task_queue:
8462                         self._dirty = True
8463
8464         def clear(self):
8465                 self._task_queue.clear()
8466                 running_tasks = self.running_tasks
8467                 while running_tasks:
8468                         task = running_tasks.pop()
8469                         task.removeExitListener(self._task_exit)
8470                         task.cancel()
8471                 self._dirty = False
8472
8473         def __nonzero__(self):
8474                 return bool(self._task_queue or self.running_tasks)
8475
8476         def __len__(self):
8477                 return len(self._task_queue) + len(self.running_tasks)
8478
8479 _can_poll_device = None
8480
8481 def can_poll_device():
8482         """
8483         Test if it's possible to use poll() on a device such as a pty. This
8484         is known to fail on Darwin.
8485         @rtype: bool
8486         @returns: True if poll() on a device succeeds, False otherwise.
8487         """
8488
8489         global _can_poll_device
8490         if _can_poll_device is not None:
8491                 return _can_poll_device
8492
8493         if not hasattr(select, "poll"):
8494                 _can_poll_device = False
8495                 return _can_poll_device
8496
8497         try:
8498                 dev_null = open('/dev/null', 'rb')
8499         except IOError:
8500                 _can_poll_device = False
8501                 return _can_poll_device
8502
8503         p = select.poll()
8504         p.register(dev_null.fileno(), PollConstants.POLLIN)
8505
8506         invalid_request = False
8507         for f, event in p.poll():
8508                 if event & PollConstants.POLLNVAL:
8509                         invalid_request = True
8510                         break
8511         dev_null.close()
8512
8513         _can_poll_device = not invalid_request
8514         return _can_poll_device
8515
8516 def create_poll_instance():
8517         """
8518         Create an instance of select.poll, or an instance of
8519         PollSelectAdapter there is no poll() implementation or
8520         it is broken somehow.
8521         """
8522         if can_poll_device():
8523                 return select.poll()
8524         return PollSelectAdapter()
8525
8526 class PollScheduler(object):
8527
8528         class _sched_iface_class(SlotObject):
8529                 __slots__ = ("register", "schedule", "unregister")
8530
8531         def __init__(self):
8532                 self._max_jobs = 1
8533                 self._max_load = None
8534                 self._jobs = 0
8535                 self._poll_event_queue = []
8536                 self._poll_event_handlers = {}
8537                 self._poll_event_handler_ids = {}
8538                 # Increment id for each new handler.
8539                 self._event_handler_id = 0
8540                 self._poll_obj = create_poll_instance()
8541                 self._scheduling = False
8542
8543         def _schedule(self):
8544                 """
8545                 Calls _schedule_tasks() and automatically returns early from
8546                 any recursive calls to this method that the _schedule_tasks()
8547                 call might trigger. This makes _schedule() safe to call from
8548                 inside exit listeners.
8549                 """
8550                 if self._scheduling:
8551                         return False
8552                 self._scheduling = True
8553                 try:
8554                         return self._schedule_tasks()
8555                 finally:
8556                         self._scheduling = False
8557
8558         def _running_job_count(self):
8559                 return self._jobs
8560
8561         def _can_add_job(self):
8562                 max_jobs = self._max_jobs
8563                 max_load = self._max_load
8564
8565                 if self._max_jobs is not True and \
8566                         self._running_job_count() >= self._max_jobs:
8567                         return False
8568
8569                 if max_load is not None and \
8570                         (max_jobs is True or max_jobs > 1) and \
8571                         self._running_job_count() >= 1:
8572                         try:
8573                                 avg1, avg5, avg15 = os.getloadavg()
8574                         except (AttributeError, OSError), e:
8575                                 writemsg("!!! getloadavg() failed: %s\n" % (e,),
8576                                         noiselevel=-1)
8577                                 del e
8578                                 return False
8579
8580                         if avg1 >= max_load:
8581                                 return False
8582
8583                 return True
8584
8585         def _poll(self, timeout=None):
8586                 """
8587                 All poll() calls pass through here. The poll events
8588                 are added directly to self._poll_event_queue.
8589                 In order to avoid endless blocking, this raises
8590                 StopIteration if timeout is None and there are
8591                 no file descriptors to poll.
8592                 """
8593                 if not self._poll_event_handlers:
8594                         self._schedule()
8595                         if timeout is None and \
8596                                 not self._poll_event_handlers:
8597                                 raise StopIteration(
8598                                         "timeout is None and there are no poll() event handlers")
8599
8600                 # The following error is known to occur with Linux kernel versions
8601                 # less than 2.6.24:
8602                 #
8603                 #   select.error: (4, 'Interrupted system call')
8604                 #
8605                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
8606                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
8607                 # without any events.
8608                 while True:
8609                         try:
8610                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
8611                                 break
8612                         except select.error, e:
8613                                 writemsg_level("\n!!! select error: %s\n" % (e,),
8614                                         level=logging.ERROR, noiselevel=-1)
8615                                 del e
8616                                 if timeout is not None:
8617                                         break
8618
8619         def _next_poll_event(self, timeout=None):
8620                 """
8621                 Since the _schedule_wait() loop is called by event
8622                 handlers from _poll_loop(), maintain a central event
8623                 queue for both of them to share events from a single
8624                 poll() call. In order to avoid endless blocking, this
8625                 raises StopIteration if timeout is None and there are
8626                 no file descriptors to poll.
8627                 """
8628                 if not self._poll_event_queue:
8629                         self._poll(timeout)
8630                 return self._poll_event_queue.pop()
8631
8632         def _poll_loop(self):
8633
8634                 event_handlers = self._poll_event_handlers
8635                 event_handled = False
8636
8637                 try:
8638                         while event_handlers:
8639                                 f, event = self._next_poll_event()
8640                                 handler, reg_id = event_handlers[f]
8641                                 handler(f, event)
8642                                 event_handled = True
8643                 except StopIteration:
8644                         event_handled = True
8645
8646                 if not event_handled:
8647                         raise AssertionError("tight loop")
8648
8649         def _schedule_yield(self):
8650                 """
8651                 Schedule for a short period of time chosen by the scheduler based
8652                 on internal state. Synchronous tasks should call this periodically
8653                 in order to allow the scheduler to service pending poll events. The
8654                 scheduler will call poll() exactly once, without blocking, and any
8655                 resulting poll events will be serviced.
8656                 """
8657                 event_handlers = self._poll_event_handlers
8658                 events_handled = 0
8659
8660                 if not event_handlers:
8661                         return bool(events_handled)
8662
8663                 if not self._poll_event_queue:
8664                         self._poll(0)
8665
8666                 try:
8667                         while event_handlers and self._poll_event_queue:
8668                                 f, event = self._next_poll_event()
8669                                 handler, reg_id = event_handlers[f]
8670                                 handler(f, event)
8671                                 events_handled += 1
8672                 except StopIteration:
8673                         events_handled += 1
8674
8675                 return bool(events_handled)
8676
8677         def _register(self, f, eventmask, handler):
8678                 """
8679                 @rtype: Integer
8680                 @return: A unique registration id, for use in schedule() or
8681                         unregister() calls.
8682                 """
8683                 if f in self._poll_event_handlers:
8684                         raise AssertionError("fd %d is already registered" % f)
8685                 self._event_handler_id += 1
8686                 reg_id = self._event_handler_id
8687                 self._poll_event_handler_ids[reg_id] = f
8688                 self._poll_event_handlers[f] = (handler, reg_id)
8689                 self._poll_obj.register(f, eventmask)
8690                 return reg_id
8691
8692         def _unregister(self, reg_id):
8693                 f = self._poll_event_handler_ids[reg_id]
8694                 self._poll_obj.unregister(f)
8695                 del self._poll_event_handlers[f]
8696                 del self._poll_event_handler_ids[reg_id]
8697
8698         def _schedule_wait(self, wait_ids):
8699                 """
8700                 Schedule until wait_id is not longer registered
8701                 for poll() events.
8702                 @type wait_id: int
8703                 @param wait_id: a task id to wait for
8704                 """
8705                 event_handlers = self._poll_event_handlers
8706                 handler_ids = self._poll_event_handler_ids
8707                 event_handled = False
8708
8709                 if isinstance(wait_ids, int):
8710                         wait_ids = frozenset([wait_ids])
8711
8712                 try:
8713                         while wait_ids.intersection(handler_ids):
8714                                 f, event = self._next_poll_event()
8715                                 handler, reg_id = event_handlers[f]
8716                                 handler(f, event)
8717                                 event_handled = True
8718                 except StopIteration:
8719                         event_handled = True
8720
8721                 return event_handled
8722
8723 class QueueScheduler(PollScheduler):
8724
8725         """
8726         Add instances of SequentialTaskQueue and then call run(). The
8727         run() method returns when no tasks remain.
8728         """
8729
8730         def __init__(self, max_jobs=None, max_load=None):
8731                 PollScheduler.__init__(self)
8732
8733                 if max_jobs is None:
8734                         max_jobs = 1
8735
8736                 self._max_jobs = max_jobs
8737                 self._max_load = max_load
8738                 self.sched_iface = self._sched_iface_class(
8739                         register=self._register,
8740                         schedule=self._schedule_wait,
8741                         unregister=self._unregister)
8742
8743                 self._queues = []
8744                 self._schedule_listeners = []
8745
8746         def add(self, q):
8747                 self._queues.append(q)
8748
8749         def remove(self, q):
8750                 self._queues.remove(q)
8751
8752         def run(self):
8753
8754                 while self._schedule():
8755                         self._poll_loop()
8756
8757                 while self._running_job_count():
8758                         self._poll_loop()
8759
8760         def _schedule_tasks(self):
8761                 """
8762                 @rtype: bool
8763                 @returns: True if there may be remaining tasks to schedule,
8764                         False otherwise.
8765                 """
8766                 while self._can_add_job():
8767                         n = self._max_jobs - self._running_job_count()
8768                         if n < 1:
8769                                 break
8770
8771                         if not self._start_next_job(n):
8772                                 return False
8773
8774                 for q in self._queues:
8775                         if q:
8776                                 return True
8777                 return False
8778
8779         def _running_job_count(self):
8780                 job_count = 0
8781                 for q in self._queues:
8782                         job_count += len(q.running_tasks)
8783                 self._jobs = job_count
8784                 return job_count
8785
8786         def _start_next_job(self, n=1):
8787                 started_count = 0
8788                 for q in self._queues:
8789                         initial_job_count = len(q.running_tasks)
8790                         q.schedule()
8791                         final_job_count = len(q.running_tasks)
8792                         if final_job_count > initial_job_count:
8793                                 started_count += (final_job_count - initial_job_count)
8794                         if started_count >= n:
8795                                 break
8796                 return started_count
8797
8798 class TaskScheduler(object):
8799
8800         """
8801         A simple way to handle scheduling of AsynchrousTask instances. Simply
8802         add tasks and call run(). The run() method returns when no tasks remain.
8803         """
8804
8805         def __init__(self, max_jobs=None, max_load=None):
8806                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
8807                 self._scheduler = QueueScheduler(
8808                         max_jobs=max_jobs, max_load=max_load)
8809                 self.sched_iface = self._scheduler.sched_iface
8810                 self.run = self._scheduler.run
8811                 self._scheduler.add(self._queue)
8812
8813         def add(self, task):
8814                 self._queue.add(task)
8815
8816         def run(self):
8817                 self._scheduler.schedule()
8818
8819 class JobStatusDisplay(object):
8820
8821         _bound_properties = ("curval", "failed", "running")
8822         _jobs_column_width = 48
8823
8824         # Don't update the display unless at least this much
8825         # time has passed, in units of seconds.
8826         _min_display_latency = 2
8827
8828         _default_term_codes = {
8829                 'cr'  : '\r',
8830                 'el'  : '\x1b[K',
8831                 'nel' : '\n',
8832         }
8833
8834         _termcap_name_map = {
8835                 'carriage_return' : 'cr',
8836                 'clr_eol'         : 'el',
8837                 'newline'         : 'nel',
8838         }
8839
8840         def __init__(self, out=sys.stdout, quiet=False):
8841                 object.__setattr__(self, "out", out)
8842                 object.__setattr__(self, "quiet", quiet)
8843                 object.__setattr__(self, "maxval", 0)
8844                 object.__setattr__(self, "merges", 0)
8845                 object.__setattr__(self, "_changed", False)
8846                 object.__setattr__(self, "_displayed", False)
8847                 object.__setattr__(self, "_last_display_time", 0)
8848                 object.__setattr__(self, "width", 80)
8849                 self.reset()
8850
8851                 isatty = hasattr(out, "isatty") and out.isatty()
8852                 object.__setattr__(self, "_isatty", isatty)
8853                 if not isatty or not self._init_term():
8854                         term_codes = {}
8855                         for k, capname in self._termcap_name_map.iteritems():
8856                                 term_codes[k] = self._default_term_codes[capname]
8857                         object.__setattr__(self, "_term_codes", term_codes)
8858
8859         def _init_term(self):
8860                 """
8861                 Initialize term control codes.
8862                 @rtype: bool
8863                 @returns: True if term codes were successfully initialized,
8864                         False otherwise.
8865                 """
8866
8867                 term_type = os.environ.get("TERM", "vt100")
8868                 tigetstr = None
8869
8870                 try:
8871                         import curses
8872                         try:
8873                                 curses.setupterm(term_type, self.out.fileno())
8874                                 tigetstr = curses.tigetstr
8875                         except curses.error:
8876                                 pass
8877                 except ImportError:
8878                         pass
8879
8880                 if tigetstr is None:
8881                         return False
8882
8883                 term_codes = {}
8884                 for k, capname in self._termcap_name_map.iteritems():
8885                         code = tigetstr(capname)
8886                         if code is None:
8887                                 code = self._default_term_codes[capname]
8888                         term_codes[k] = code
8889                 object.__setattr__(self, "_term_codes", term_codes)
8890                 return True
8891
8892         def _format_msg(self, msg):
8893                 return ">>> %s" % msg
8894
8895         def _erase(self):
8896                 self.out.write(
8897                         self._term_codes['carriage_return'] + \
8898                         self._term_codes['clr_eol'])
8899                 self.out.flush()
8900                 self._displayed = False
8901
8902         def _display(self, line):
8903                 self.out.write(line)
8904                 self.out.flush()
8905                 self._displayed = True
8906
8907         def _update(self, msg):
8908
8909                 out = self.out
8910                 if not self._isatty:
8911                         out.write(self._format_msg(msg) + self._term_codes['newline'])
8912                         self.out.flush()
8913                         self._displayed = True
8914                         return
8915
8916                 if self._displayed:
8917                         self._erase()
8918
8919                 self._display(self._format_msg(msg))
8920
8921         def displayMessage(self, msg):
8922
8923                 was_displayed = self._displayed
8924
8925                 if self._isatty and self._displayed:
8926                         self._erase()
8927
8928                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
8929                 self.out.flush()
8930                 self._displayed = False
8931
8932                 if was_displayed:
8933                         self._changed = True
8934                         self.display()
8935
8936         def reset(self):
8937                 self.maxval = 0
8938                 self.merges = 0
8939                 for name in self._bound_properties:
8940                         object.__setattr__(self, name, 0)
8941
8942                 if self._displayed:
8943                         self.out.write(self._term_codes['newline'])
8944                         self.out.flush()
8945                         self._displayed = False
8946
8947         def __setattr__(self, name, value):
8948                 old_value = getattr(self, name)
8949                 if value == old_value:
8950                         return
8951                 object.__setattr__(self, name, value)
8952                 if name in self._bound_properties:
8953                         self._property_change(name, old_value, value)
8954
8955         def _property_change(self, name, old_value, new_value):
8956                 self._changed = True
8957                 self.display()
8958
8959         def _load_avg_str(self):
8960                 try:
8961                         avg = os.getloadavg()
8962                 except (AttributeError, OSError), e:
8963                         return str(e)
8964
8965                 max_avg = max(avg)
8966
8967                 if max_avg < 10:
8968                         digits = 2
8969                 elif max_avg < 100:
8970                         digits = 1
8971                 else:
8972                         digits = 0
8973
8974                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
8975
8976         def display(self):
8977                 """
8978                 Display status on stdout, but only if something has
8979                 changed since the last call.
8980                 """
8981
8982                 if self.quiet:
8983                         return
8984
8985                 current_time = time.time()
8986                 time_delta = current_time - self._last_display_time
8987                 if self._displayed and \
8988                         not self._changed:
8989                         if not self._isatty:
8990                                 return
8991                         if time_delta < self._min_display_latency:
8992                                 return
8993
8994                 self._last_display_time = current_time
8995                 self._changed = False
8996                 self._display_status()
8997
8998         def _display_status(self):
8999                 # Don't use len(self._completed_tasks) here since that also
9000                 # can include uninstall tasks.
9001                 curval_str = str(self.curval)
9002                 maxval_str = str(self.maxval)
9003                 running_str = str(self.running)
9004                 failed_str = str(self.failed)
9005                 load_avg_str = self._load_avg_str()
9006
9007                 color_output = StringIO.StringIO()
9008                 plain_output = StringIO.StringIO()
9009                 style_file = portage.output.ConsoleStyleFile(color_output)
9010                 style_file.write_listener = plain_output
9011                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9012                 style_writer.style_listener = style_file.new_styles
9013                 f = formatter.AbstractFormatter(style_writer)
9014
9015                 number_style = "INFORM"
9016                 f.add_literal_data("Jobs: ")
9017                 f.push_style(number_style)
9018                 f.add_literal_data(curval_str)
9019                 f.pop_style()
9020                 f.add_literal_data(" of ")
9021                 f.push_style(number_style)
9022                 f.add_literal_data(maxval_str)
9023                 f.pop_style()
9024                 f.add_literal_data(" complete")
9025
9026                 if self.running:
9027                         f.add_literal_data(", ")
9028                         f.push_style(number_style)
9029                         f.add_literal_data(running_str)
9030                         f.pop_style()
9031                         f.add_literal_data(" running")
9032
9033                 if self.failed:
9034                         f.add_literal_data(", ")
9035                         f.push_style(number_style)
9036                         f.add_literal_data(failed_str)
9037                         f.pop_style()
9038                         f.add_literal_data(" failed")
9039
9040                 padding = self._jobs_column_width - len(plain_output.getvalue())
9041                 if padding > 0:
9042                         f.add_literal_data(padding * " ")
9043
9044                 f.add_literal_data("Load avg: ")
9045                 f.add_literal_data(load_avg_str)
9046
9047                 # Truncate to fit width, to avoid making the terminal scroll if the
9048                 # line overflows (happens when the load average is large).
9049                 plain_output = plain_output.getvalue()
9050                 if self._isatty and len(plain_output) > self.width:
9051                         # Use plain_output here since it's easier to truncate
9052                         # properly than the color output which contains console
9053                         # color codes.
9054                         self._update(plain_output[:self.width])
9055                 else:
9056                         self._update(color_output.getvalue())
9057
9058                 xtermTitle(" ".join(plain_output.split()))
9059
9060 class Scheduler(PollScheduler):
9061
9062         _opts_ignore_blockers = \
9063                 frozenset(["--buildpkgonly",
9064                 "--fetchonly", "--fetch-all-uri",
9065                 "--nodeps", "--pretend"])
9066
9067         _opts_no_background = \
9068                 frozenset(["--pretend",
9069                 "--fetchonly", "--fetch-all-uri"])
9070
9071         _opts_no_restart = frozenset(["--buildpkgonly",
9072                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9073
9074         _bad_resume_opts = set(["--ask", "--changelog",
9075                 "--resume", "--skipfirst"])
9076
9077         _fetch_log = "/var/log/emerge-fetch.log"
9078
9079         class _iface_class(SlotObject):
9080                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9081                         "dblinkElog", "fetch", "register", "schedule",
9082                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9083                         "unregister")
9084
9085         class _fetch_iface_class(SlotObject):
9086                 __slots__ = ("log_file", "schedule")
9087
9088         _task_queues_class = slot_dict_class(
9089                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9090
9091         class _build_opts_class(SlotObject):
9092                 __slots__ = ("buildpkg", "buildpkgonly",
9093                         "fetch_all_uri", "fetchonly", "pretend")
9094
9095         class _binpkg_opts_class(SlotObject):
9096                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9097
9098         class _pkg_count_class(SlotObject):
9099                 __slots__ = ("curval", "maxval")
9100
9101         class _emerge_log_class(SlotObject):
9102                 __slots__ = ("xterm_titles",)
9103
9104                 def log(self, *pargs, **kwargs):
9105                         if not self.xterm_titles:
9106                                 # Avoid interference with the scheduler's status display.
9107                                 kwargs.pop("short_msg", None)
9108                         emergelog(self.xterm_titles, *pargs, **kwargs)
9109
9110         class _failed_pkg(SlotObject):
9111                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9112
9113         class _ConfigPool(object):
9114                 """Interface for a task to temporarily allocate a config
9115                 instance from a pool. This allows a task to be constructed
9116                 long before the config instance actually becomes needed, like
9117                 when prefetchers are constructed for the whole merge list."""
9118                 __slots__ = ("_root", "_allocate", "_deallocate")
9119                 def __init__(self, root, allocate, deallocate):
9120                         self._root = root
9121                         self._allocate = allocate
9122                         self._deallocate = deallocate
9123                 def allocate(self):
9124                         return self._allocate(self._root)
9125                 def deallocate(self, settings):
9126                         self._deallocate(settings)
9127
9128         class _unknown_internal_error(portage.exception.PortageException):
9129                 """
9130                 Used internally to terminate scheduling. The specific reason for
9131                 the failure should have been dumped to stderr.
9132                 """
9133                 def __init__(self, value=""):
9134                         portage.exception.PortageException.__init__(self, value)
9135
9136         def __init__(self, settings, trees, mtimedb, myopts,
9137                 spinner, mergelist, favorites, digraph):
9138                 PollScheduler.__init__(self)
9139                 self.settings = settings
9140                 self.target_root = settings["ROOT"]
9141                 self.trees = trees
9142                 self.myopts = myopts
9143                 self._spinner = spinner
9144                 self._mtimedb = mtimedb
9145                 self._mergelist = mergelist
9146                 self._favorites = favorites
9147                 self._args_set = InternalPackageSet(favorites)
9148                 self._build_opts = self._build_opts_class()
9149                 for k in self._build_opts.__slots__:
9150                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9151                 self._binpkg_opts = self._binpkg_opts_class()
9152                 for k in self._binpkg_opts.__slots__:
9153                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9154
9155                 self.curval = 0
9156                 self._logger = self._emerge_log_class()
9157                 self._task_queues = self._task_queues_class()
9158                 for k in self._task_queues.allowed_keys:
9159                         setattr(self._task_queues, k,
9160                                 SequentialTaskQueue())
9161                 self._status_display = JobStatusDisplay()
9162                 self._max_load = myopts.get("--load-average")
9163                 max_jobs = myopts.get("--jobs")
9164                 if max_jobs is None:
9165                         max_jobs = 1
9166                 self._set_max_jobs(max_jobs)
9167
9168                 # The root where the currently running
9169                 # portage instance is installed.
9170                 self._running_root = trees["/"]["root_config"]
9171                 self.edebug = 0
9172                 if settings.get("PORTAGE_DEBUG", "") == "1":
9173                         self.edebug = 1
9174                 self.pkgsettings = {}
9175                 self._config_pool = {}
9176                 self._blocker_db = {}
9177                 for root in trees:
9178                         self._config_pool[root] = []
9179                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9180
9181                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9182                         schedule=self._schedule_fetch)
9183                 self._sched_iface = self._iface_class(
9184                         dblinkEbuildPhase=self._dblink_ebuild_phase,
9185                         dblinkDisplayMerge=self._dblink_display_merge,
9186                         dblinkElog=self._dblink_elog,
9187                         fetch=fetch_iface, register=self._register,
9188                         schedule=self._schedule_wait,
9189                         scheduleSetup=self._schedule_setup,
9190                         scheduleUnpack=self._schedule_unpack,
9191                         scheduleYield=self._schedule_yield,
9192                         unregister=self._unregister)
9193
9194                 self._prefetchers = weakref.WeakValueDictionary()
9195                 self._pkg_queue = []
9196                 self._completed_tasks = set()
9197
9198                 self._failed_pkgs = []
9199                 self._failed_pkgs_all = []
9200                 self._failed_pkgs_die_msgs = []
9201                 self._post_mod_echo_msgs = []
9202                 self._parallel_fetch = False
9203                 merge_count = len([x for x in mergelist \
9204                         if isinstance(x, Package) and x.operation == "merge"])
9205                 self._pkg_count = self._pkg_count_class(
9206                         curval=0, maxval=merge_count)
9207                 self._status_display.maxval = self._pkg_count.maxval
9208
9209                 # The load average takes some time to respond when new
9210                 # jobs are added, so we need to limit the rate of adding
9211                 # new jobs.
9212                 self._job_delay_max = 10
9213                 self._job_delay_factor = 1.0
9214                 self._job_delay_exp = 1.5
9215                 self._previous_job_start_time = None
9216
9217                 self._set_digraph(digraph)
9218
9219                 # This is used to memoize the _choose_pkg() result when
9220                 # no packages can be chosen until one of the existing
9221                 # jobs completes.
9222                 self._choose_pkg_return_early = False
9223
9224                 features = self.settings.features
9225                 if "parallel-fetch" in features and \
9226                         not ("--pretend" in self.myopts or \
9227                         "--fetch-all-uri" in self.myopts or \
9228                         "--fetchonly" in self.myopts):
9229                         if "distlocks" not in features:
9230                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9231                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
9232                                         "requires the distlocks feature enabled"+"\n",
9233                                         noiselevel=-1)
9234                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
9235                                         "thus parallel-fetching is being disabled"+"\n",
9236                                         noiselevel=-1)
9237                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9238                         elif len(mergelist) > 1:
9239                                 self._parallel_fetch = True
9240
9241                 if self._parallel_fetch:
9242                                 # clear out existing fetch log if it exists
9243                                 try:
9244                                         open(self._fetch_log, 'w')
9245                                 except EnvironmentError:
9246                                         pass
9247
9248                 self._running_portage = None
9249                 portage_match = self._running_root.trees["vartree"].dbapi.match(
9250                         portage.const.PORTAGE_PACKAGE_ATOM)
9251                 if portage_match:
9252                         cpv = portage_match.pop()
9253                         self._running_portage = self._pkg(cpv, "installed",
9254                                 self._running_root, installed=True)
9255
9256         def _poll(self, timeout=None):
9257                 self._schedule()
9258                 PollScheduler._poll(self, timeout=timeout)
9259
9260         def _set_max_jobs(self, max_jobs):
9261                 self._max_jobs = max_jobs
9262                 self._task_queues.jobs.max_jobs = max_jobs
9263
9264         def _background_mode(self):
9265                 """
9266                 Check if background mode is enabled and adjust states as necessary.
9267
9268                 @rtype: bool
9269                 @returns: True if background mode is enabled, False otherwise.
9270                 """
9271                 background = (self._max_jobs is True or \
9272                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
9273                         not bool(self._opts_no_background.intersection(self.myopts))
9274
9275                 if background:
9276                         interactive_tasks = self._get_interactive_tasks()
9277                         if interactive_tasks:
9278                                 background = False
9279                                 writemsg_level(">>> Sending package output to stdio due " + \
9280                                         "to interactive package(s):\n",
9281                                         level=logging.INFO, noiselevel=-1)
9282                                 msg = [""]
9283                                 for pkg in interactive_tasks:
9284                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
9285                                         if pkg.root != "/":
9286                                                 pkg_str += " for " + pkg.root
9287                                         msg.append(pkg_str)
9288                                 msg.append("")
9289                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
9290                                         level=logging.INFO, noiselevel=-1)
9291                                 if self._max_jobs is True or self._max_jobs > 1:
9292                                         self._set_max_jobs(1)
9293                                         writemsg_level(">>> Setting --jobs=1 due " + \
9294                                                 "to the above interactive package(s)\n",
9295                                                 level=logging.INFO, noiselevel=-1)
9296
9297                 self._status_display.quiet = \
9298                         not background or \
9299                         ("--quiet" in self.myopts and \
9300                         "--verbose" not in self.myopts)
9301
9302                 self._logger.xterm_titles = \
9303                         "notitles" not in self.settings.features and \
9304                         self._status_display.quiet
9305
9306                 return background
9307
9308         def _get_interactive_tasks(self):
9309                 from portage import flatten
9310                 from portage.dep import use_reduce, paren_reduce
9311                 interactive_tasks = []
9312                 for task in self._mergelist:
9313                         if not (isinstance(task, Package) and \
9314                                 task.operation == "merge"):
9315                                 continue
9316                         try:
9317                                 properties = flatten(use_reduce(paren_reduce(
9318                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9319                         except portage.exception.InvalidDependString, e:
9320                                 show_invalid_depstring_notice(task,
9321                                         task.metadata["PROPERTIES"], str(e))
9322                                 raise self._unknown_internal_error()
9323                         if "interactive" in properties:
9324                                 interactive_tasks.append(task)
9325                 return interactive_tasks
9326
9327         def _set_digraph(self, digraph):
9328                 if "--nodeps" in self.myopts or \
9329                         (self._max_jobs is not True and self._max_jobs < 2):
9330                         # save some memory
9331                         self._digraph = None
9332                         return
9333
9334                 self._digraph = digraph
9335                 self._prune_digraph()
9336
9337         def _prune_digraph(self):
9338                 """
9339                 Prune any root nodes that are irrelevant.
9340                 """
9341
9342                 graph = self._digraph
9343                 completed_tasks = self._completed_tasks
9344                 removed_nodes = set()
9345                 while True:
9346                         for node in graph.root_nodes():
9347                                 if not isinstance(node, Package) or \
9348                                         (node.installed and node.operation == "nomerge") or \
9349                                         node.onlydeps or \
9350                                         node in completed_tasks:
9351                                         removed_nodes.add(node)
9352                         if removed_nodes:
9353                                 graph.difference_update(removed_nodes)
9354                         if not removed_nodes:
9355                                 break
9356                         removed_nodes.clear()
9357
9358         class _pkg_failure(portage.exception.PortageException):
9359                 """
9360                 An instance of this class is raised by unmerge() when
9361                 an uninstallation fails.
9362                 """
9363                 status = 1
9364                 def __init__(self, *pargs):
9365                         portage.exception.PortageException.__init__(self, pargs)
9366                         if pargs:
9367                                 self.status = pargs[0]
9368
9369         def _schedule_fetch(self, fetcher):
9370                 """
9371                 Schedule a fetcher on the fetch queue, in order to
9372                 serialize access to the fetch log.
9373                 """
9374                 self._task_queues.fetch.addFront(fetcher)
9375
9376         def _schedule_setup(self, setup_phase):
9377                 """
9378                 Schedule a setup phase on the merge queue, in order to
9379                 serialize unsandboxed access to the live filesystem.
9380                 """
9381                 self._task_queues.merge.addFront(setup_phase)
9382                 self._schedule()
9383
9384         def _schedule_unpack(self, unpack_phase):
9385                 """
9386                 Schedule an unpack phase on the unpack queue, in order
9387                 to serialize $DISTDIR access for live ebuilds.
9388                 """
9389                 self._task_queues.unpack.add(unpack_phase)
9390
9391         def _find_blockers(self, new_pkg):
9392                 """
9393                 Returns a callable which should be called only when
9394                 the vdb lock has been acquired.
9395                 """
9396                 def get_blockers():
9397                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9398                 return get_blockers
9399
9400         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9401                 if self._opts_ignore_blockers.intersection(self.myopts):
9402                         return None
9403
9404                 # Call gc.collect() here to avoid heap overflow that
9405                 # triggers 'Cannot allocate memory' errors (reported
9406                 # with python-2.5).
9407                 import gc
9408                 gc.collect()
9409
9410                 blocker_db = self._blocker_db[new_pkg.root]
9411
9412                 blocker_dblinks = []
9413                 for blocking_pkg in blocker_db.findInstalledBlockers(
9414                         new_pkg, acquire_lock=acquire_lock):
9415                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
9416                                 continue
9417                         if new_pkg.cpv == blocking_pkg.cpv:
9418                                 continue
9419                         blocker_dblinks.append(portage.dblink(
9420                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9421                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9422                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
9423
9424                 gc.collect()
9425
9426                 return blocker_dblinks
9427
9428         def _dblink_pkg(self, pkg_dblink):
9429                 cpv = pkg_dblink.mycpv
9430                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
9431                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
9432                 installed = type_name == "installed"
9433                 return self._pkg(cpv, type_name, root_config, installed=installed)
9434
9435         def _append_to_log_path(self, log_path, msg):
9436                 f = open(log_path, 'a')
9437                 try:
9438                         f.write(msg)
9439                 finally:
9440                         f.close()
9441
9442         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
9443
9444                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9445                 log_file = None
9446                 out = sys.stdout
9447                 background = self._background
9448
9449                 if background and log_path is not None:
9450                         log_file = open(log_path, 'a')
9451                         out = log_file
9452
9453                 try:
9454                         for msg in msgs:
9455                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
9456                 finally:
9457                         if log_file is not None:
9458                                 log_file.close()
9459
9460         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
9461                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
9462                 background = self._background
9463
9464                 if log_path is None:
9465                         if not (background and level < logging.WARN):
9466                                 portage.util.writemsg_level(msg,
9467                                         level=level, noiselevel=noiselevel)
9468                 else:
9469                         if not background:
9470                                 portage.util.writemsg_level(msg,
9471                                         level=level, noiselevel=noiselevel)
9472                         self._append_to_log_path(log_path, msg)
9473
9474         def _dblink_ebuild_phase(self,
9475                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
9476                 """
9477                 Using this callback for merge phases allows the scheduler
9478                 to run while these phases execute asynchronously, and allows
9479                 the scheduler control output handling.
9480                 """
9481
9482                 scheduler = self._sched_iface
9483                 settings = pkg_dblink.settings
9484                 pkg = self._dblink_pkg(pkg_dblink)
9485                 background = self._background
9486                 log_path = settings.get("PORTAGE_LOG_FILE")
9487
9488                 ebuild_phase = EbuildPhase(background=background,
9489                         pkg=pkg, phase=phase, scheduler=scheduler,
9490                         settings=settings, tree=pkg_dblink.treetype)
9491                 ebuild_phase.start()
9492                 ebuild_phase.wait()
9493
9494                 return ebuild_phase.returncode
9495
9496         def _check_manifests(self):
9497                 # Verify all the manifests now so that the user is notified of failure
9498                 # as soon as possible.
9499                 if "strict" not in self.settings.features or \
9500                         "--fetchonly" in self.myopts or \
9501                         "--fetch-all-uri" in self.myopts:
9502                         return os.EX_OK
9503
9504                 shown_verifying_msg = False
9505                 quiet_settings = {}
9506                 for myroot, pkgsettings in self.pkgsettings.iteritems():
9507                         quiet_config = portage.config(clone=pkgsettings)
9508                         quiet_config["PORTAGE_QUIET"] = "1"
9509                         quiet_config.backup_changes("PORTAGE_QUIET")
9510                         quiet_settings[myroot] = quiet_config
9511                         del quiet_config
9512
9513                 for x in self._mergelist:
9514                         if not isinstance(x, Package) or \
9515                                 x.type_name != "ebuild":
9516                                 continue
9517
9518                         if not shown_verifying_msg:
9519                                 shown_verifying_msg = True
9520                                 self._status_msg("Verifying ebuild manifests")
9521
9522                         root_config = x.root_config
9523                         portdb = root_config.trees["porttree"].dbapi
9524                         quiet_config = quiet_settings[root_config.root]
9525                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
9526                         if not portage.digestcheck([], quiet_config, strict=True):
9527                                 return 1
9528
9529                 return os.EX_OK
9530
9531         def _add_prefetchers(self):
9532
9533                 if not self._parallel_fetch:
9534                         return
9535
9536                 if self._parallel_fetch:
9537                         self._status_msg("Starting parallel fetch")
9538
9539                         prefetchers = self._prefetchers
9540                         getbinpkg = "--getbinpkg" in self.myopts
9541
9542                         # In order to avoid "waiting for lock" messages
9543                         # at the beginning, which annoy users, never
9544                         # spawn a prefetcher for the first package.
9545                         for pkg in self._mergelist[1:]:
9546                                 prefetcher = self._create_prefetcher(pkg)
9547                                 if prefetcher is not None:
9548                                         self._task_queues.fetch.add(prefetcher)
9549                                         prefetchers[pkg] = prefetcher
9550
9551         def _create_prefetcher(self, pkg):
9552                 """
9553                 @return: a prefetcher, or None if not applicable
9554                 """
9555                 prefetcher = None
9556
9557                 if not isinstance(pkg, Package):
9558                         pass
9559
9560                 elif pkg.type_name == "ebuild":
9561
9562                         prefetcher = EbuildFetcher(background=True,
9563                                 config_pool=self._ConfigPool(pkg.root,
9564                                 self._allocate_config, self._deallocate_config),
9565                                 fetchonly=1, logfile=self._fetch_log,
9566                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
9567
9568                 elif pkg.type_name == "binary" and \
9569                         "--getbinpkg" in self.myopts and \
9570                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
9571
9572                         prefetcher = BinpkgFetcher(background=True,
9573                                 logfile=self._fetch_log, pkg=pkg,
9574                                 scheduler=self._sched_iface)
9575
9576                 return prefetcher
9577
9578         def _is_restart_scheduled(self):
9579                 """
9580                 Check if the merge list contains a replacement
9581                 for the current running instance, that will result
9582                 in restart after merge.
9583                 @rtype: bool
9584                 @returns: True if a restart is scheduled, False otherwise.
9585                 """
9586                 if self._opts_no_restart.intersection(self.myopts):
9587                         return False
9588
9589                 mergelist = self._mergelist
9590
9591                 for i, pkg in enumerate(mergelist):
9592                         if self._is_restart_necessary(pkg) and \
9593                                 i != len(mergelist) - 1:
9594                                 return True
9595
9596                 return False
9597
9598         def _is_restart_necessary(self, pkg):
9599                 """
9600                 @return: True if merging the given package
9601                         requires restart, False otherwise.
9602                 """
9603
9604                 # Figure out if we need a restart.
9605                 if pkg.root == self._running_root.root and \
9606                         portage.match_from_list(
9607                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
9608                         if self._running_portage:
9609                                 return cmp(pkg, self._running_portage) != 0
9610                         return True
9611                 return False
9612
9613         def _restart_if_necessary(self, pkg):
9614                 """
9615                 Use execv() to restart emerge. This happens
9616                 if portage upgrades itself and there are
9617                 remaining packages in the list.
9618                 """
9619
9620                 if self._opts_no_restart.intersection(self.myopts):
9621                         return
9622
9623                 if not self._is_restart_necessary(pkg):
9624                         return
9625
9626                 if pkg == self._mergelist[-1]:
9627                         return
9628
9629                 self._main_loop_cleanup()
9630
9631                 logger = self._logger
9632                 pkg_count = self._pkg_count
9633                 mtimedb = self._mtimedb
9634                 bad_resume_opts = self._bad_resume_opts
9635
9636                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
9637                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
9638
9639                 logger.log(" *** RESTARTING " + \
9640                         "emerge via exec() after change of " + \
9641                         "portage version.")
9642
9643                 mtimedb["resume"]["mergelist"].remove(list(pkg))
9644                 mtimedb.commit()
9645                 portage.run_exitfuncs()
9646                 mynewargv = [sys.argv[0], "--resume"]
9647                 resume_opts = self.myopts.copy()
9648                 # For automatic resume, we need to prevent
9649                 # any of bad_resume_opts from leaking in
9650                 # via EMERGE_DEFAULT_OPTS.
9651                 resume_opts["--ignore-default-opts"] = True
9652                 for myopt, myarg in resume_opts.iteritems():
9653                         if myopt not in bad_resume_opts:
9654                                 if myarg is True:
9655                                         mynewargv.append(myopt)
9656                                 else:
9657                                         mynewargv.append(myopt +"="+ str(myarg))
9658                 # priority only needs to be adjusted on the first run
9659                 os.environ["PORTAGE_NICENESS"] = "0"
9660                 os.execv(mynewargv[0], mynewargv)
9661
9662         def merge(self):
9663
9664                 if "--resume" in self.myopts:
9665                         # We're resuming.
9666                         portage.writemsg_stdout(
9667                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
9668                         self._logger.log(" *** Resuming merge...")
9669
9670                 self._save_resume_list()
9671
9672                 try:
9673                         self._background = self._background_mode()
9674                 except self._unknown_internal_error:
9675                         return 1
9676
9677                 for root in self.trees:
9678                         root_config = self.trees[root]["root_config"]
9679                         if self._background:
9680                                 root_config.settings.unlock()
9681                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
9682                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
9683                                 root_config.settings.lock()
9684
9685                         self.pkgsettings[root] = portage.config(
9686                                 clone=root_config.settings)
9687
9688                 rval = self._check_manifests()
9689                 if rval != os.EX_OK:
9690                         return rval
9691
9692                 keep_going = "--keep-going" in self.myopts
9693                 fetchonly = self._build_opts.fetchonly
9694                 mtimedb = self._mtimedb
9695                 failed_pkgs = self._failed_pkgs
9696
9697                 while True:
9698                         rval = self._merge()
9699                         if rval == os.EX_OK or fetchonly or not keep_going:
9700                                 break
9701                         if "resume" not in mtimedb:
9702                                 break
9703                         mergelist = self._mtimedb["resume"].get("mergelist")
9704                         if not mergelist:
9705                                 break
9706
9707                         if not failed_pkgs:
9708                                 break
9709
9710                         for failed_pkg in failed_pkgs:
9711                                 mergelist.remove(list(failed_pkg.pkg))
9712
9713                         self._failed_pkgs_all.extend(failed_pkgs)
9714                         del failed_pkgs[:]
9715
9716                         if not mergelist:
9717                                 break
9718
9719                         if not self._calc_resume_list():
9720                                 break
9721
9722                         clear_caches(self.trees)
9723                         if not self._mergelist:
9724                                 break
9725
9726                         self._save_resume_list()
9727                         self._pkg_count.curval = 0
9728                         self._pkg_count.maxval = len([x for x in self._mergelist \
9729                                 if isinstance(x, Package) and x.operation == "merge"])
9730                         self._status_display.maxval = self._pkg_count.maxval
9731
9732                 self._logger.log(" *** Finished. Cleaning up...")
9733
9734                 if failed_pkgs:
9735                         self._failed_pkgs_all.extend(failed_pkgs)
9736                         del failed_pkgs[:]
9737
9738                 background = self._background
9739                 failure_log_shown = False
9740                 if background and len(self._failed_pkgs_all) == 1:
9741                         # If only one package failed then just show it's
9742                         # whole log for easy viewing.
9743                         failed_pkg = self._failed_pkgs_all[-1]
9744                         build_dir = failed_pkg.build_dir
9745                         log_file = None
9746
9747                         log_paths = [failed_pkg.build_log]
9748
9749                         log_path = self._locate_failure_log(failed_pkg)
9750                         if log_path is not None:
9751                                 try:
9752                                         log_file = open(log_path, 'rb')
9753                                 except IOError:
9754                                         pass
9755
9756                         if log_file is not None:
9757                                 try:
9758                                         for line in log_file:
9759                                                 writemsg_level(line, noiselevel=-1)
9760                                 finally:
9761                                         log_file.close()
9762                                 failure_log_shown = True
9763
9764                 # Dump mod_echo output now since it tends to flood the terminal.
9765                 # This allows us to avoid having more important output, generated
9766                 # later, from being swept away by the mod_echo output.
9767                 mod_echo_output =  _flush_elog_mod_echo()
9768
9769                 if background and not failure_log_shown and \
9770                         self._failed_pkgs_all and \
9771                         self._failed_pkgs_die_msgs and \
9772                         not mod_echo_output:
9773
9774                         printer = portage.output.EOutput()
9775                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
9776                                 root_msg = ""
9777                                 if mysettings["ROOT"] != "/":
9778                                         root_msg = " merged to %s" % mysettings["ROOT"]
9779                                 print
9780                                 printer.einfo("Error messages for package %s%s:" % \
9781                                         (colorize("INFORM", key), root_msg))
9782                                 print
9783                                 for phase in portage.const.EBUILD_PHASES:
9784                                         if phase not in logentries:
9785                                                 continue
9786                                         for msgtype, msgcontent in logentries[phase]:
9787                                                 if isinstance(msgcontent, basestring):
9788                                                         msgcontent = [msgcontent]
9789                                                 for line in msgcontent:
9790                                                         printer.eerror(line.strip("\n"))
9791
9792                 if self._post_mod_echo_msgs:
9793                         for msg in self._post_mod_echo_msgs:
9794                                 msg()
9795
9796                 if len(self._failed_pkgs_all) > 1:
9797                         msg = "The following packages have " + \
9798                                 "failed to build or install:"
9799                         prefix = bad(" * ")
9800                         writemsg(prefix + "\n", noiselevel=-1)
9801                         from textwrap import wrap
9802                         for line in wrap(msg, 72):
9803                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
9804                         writemsg(prefix + "\n", noiselevel=-1)
9805                         for failed_pkg in self._failed_pkgs_all:
9806                                 writemsg("%s\t%s\n" % (prefix,
9807                                         colorize("INFORM", str(failed_pkg.pkg))),
9808                                         noiselevel=-1)
9809                         writemsg(prefix + "\n", noiselevel=-1)
9810
9811                 return rval
9812
9813         def _elog_listener(self, mysettings, key, logentries, fulltext):
9814                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
9815                 if errors:
9816                         self._failed_pkgs_die_msgs.append(
9817                                 (mysettings, key, errors))
9818
9819         def _locate_failure_log(self, failed_pkg):
9820
9821                 build_dir = failed_pkg.build_dir
9822                 log_file = None
9823
9824                 log_paths = [failed_pkg.build_log]
9825
9826                 for log_path in log_paths:
9827                         if not log_path:
9828                                 continue
9829
9830                         try:
9831                                 log_size = os.stat(log_path).st_size
9832                         except OSError:
9833                                 continue
9834
9835                         if log_size == 0:
9836                                 continue
9837
9838                         return log_path
9839
9840                 return None
9841
9842         def _add_packages(self):
9843                 pkg_queue = self._pkg_queue
9844                 for pkg in self._mergelist:
9845                         if isinstance(pkg, Package):
9846                                 pkg_queue.append(pkg)
9847                         elif isinstance(pkg, Blocker):
9848                                 pass
9849
9850         def _merge_exit(self, merge):
9851                 self._do_merge_exit(merge)
9852                 self._deallocate_config(merge.merge.settings)
9853                 if merge.returncode == os.EX_OK and \
9854                         not merge.merge.pkg.installed:
9855                         self._status_display.curval += 1
9856                 self._status_display.merges = len(self._task_queues.merge)
9857                 self._schedule()
9858
9859         def _do_merge_exit(self, merge):
9860                 pkg = merge.merge.pkg
9861                 if merge.returncode != os.EX_OK:
9862                         settings = merge.merge.settings
9863                         build_dir = settings.get("PORTAGE_BUILDDIR")
9864                         build_log = settings.get("PORTAGE_LOG_FILE")
9865
9866                         self._failed_pkgs.append(self._failed_pkg(
9867                                 build_dir=build_dir, build_log=build_log,
9868                                 pkg=pkg,
9869                                 returncode=merge.returncode))
9870                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
9871
9872                         self._status_display.failed = len(self._failed_pkgs)
9873                         return
9874
9875                 self._task_complete(pkg)
9876                 pkg_to_replace = merge.merge.pkg_to_replace
9877                 if pkg_to_replace is not None:
9878                         # When a package is replaced, mark it's uninstall
9879                         # task complete (if any).
9880                         uninst_hash_key = \
9881                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
9882                         self._task_complete(uninst_hash_key)
9883
9884                 if pkg.installed:
9885                         return
9886
9887                 self._restart_if_necessary(pkg)
9888
9889                 # Call mtimedb.commit() after each merge so that
9890                 # --resume still works after being interrupted
9891                 # by reboot, sigkill or similar.
9892                 mtimedb = self._mtimedb
9893                 mtimedb["resume"]["mergelist"].remove(list(pkg))
9894                 if not mtimedb["resume"]["mergelist"]:
9895                         del mtimedb["resume"]
9896                 mtimedb.commit()
9897
9898         def _build_exit(self, build):
9899                 if build.returncode == os.EX_OK:
9900                         self.curval += 1
9901                         merge = PackageMerge(merge=build)
9902                         merge.addExitListener(self._merge_exit)
9903                         self._task_queues.merge.add(merge)
9904                         self._status_display.merges = len(self._task_queues.merge)
9905                 else:
9906                         settings = build.settings
9907                         build_dir = settings.get("PORTAGE_BUILDDIR")
9908                         build_log = settings.get("PORTAGE_LOG_FILE")
9909
9910                         self._failed_pkgs.append(self._failed_pkg(
9911                                 build_dir=build_dir, build_log=build_log,
9912                                 pkg=build.pkg,
9913                                 returncode=build.returncode))
9914                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
9915
9916                         self._status_display.failed = len(self._failed_pkgs)
9917                         self._deallocate_config(build.settings)
9918                 self._jobs -= 1
9919                 self._status_display.running = self._jobs
9920                 self._schedule()
9921
9922         def _extract_exit(self, build):
9923                 self._build_exit(build)
9924
9925         def _task_complete(self, pkg):
9926                 self._completed_tasks.add(pkg)
9927                 self._choose_pkg_return_early = False
9928
9929         def _merge(self):
9930
9931                 self._add_prefetchers()
9932                 self._add_packages()
9933                 pkg_queue = self._pkg_queue
9934                 failed_pkgs = self._failed_pkgs
9935                 portage.locks._quiet = self._background
9936                 portage.elog._emerge_elog_listener = self._elog_listener
9937                 rval = os.EX_OK
9938
9939                 try:
9940                         self._main_loop()
9941                 finally:
9942                         self._main_loop_cleanup()
9943                         portage.locks._quiet = False
9944                         portage.elog._emerge_elog_listener = None
9945                         if failed_pkgs:
9946                                 rval = failed_pkgs[-1].returncode
9947
9948                 return rval
9949
9950         def _main_loop_cleanup(self):
9951                 del self._pkg_queue[:]
9952                 self._completed_tasks.clear()
9953                 self._choose_pkg_return_early = False
9954                 self._status_display.reset()
9955                 self._digraph = None
9956                 self._task_queues.fetch.clear()
9957
9958         def _choose_pkg(self):
9959                 """
9960                 Choose a task that has all it's dependencies satisfied.
9961                 """
9962
9963                 if self._choose_pkg_return_early:
9964                         return None
9965
9966                 if self._digraph is None:
9967                         if (self._jobs or self._task_queues.merge) and \
9968                                 not ("--nodeps" in self.myopts and \
9969                                 (self._max_jobs is True or self._max_jobs > 1)):
9970                                 self._choose_pkg_return_early = True
9971                                 return None
9972                         return self._pkg_queue.pop(0)
9973
9974                 if not (self._jobs or self._task_queues.merge):
9975                         return self._pkg_queue.pop(0)
9976
9977                 self._prune_digraph()
9978
9979                 chosen_pkg = None
9980                 later = set(self._pkg_queue)
9981                 for pkg in self._pkg_queue:
9982                         later.remove(pkg)
9983                         if not self._dependent_on_scheduled_merges(pkg, later):
9984                                 chosen_pkg = pkg
9985                                 break
9986
9987                 if chosen_pkg is not None:
9988                         self._pkg_queue.remove(chosen_pkg)
9989
9990                 if chosen_pkg is None:
9991                         # There's no point in searching for a package to
9992                         # choose until at least one of the existing jobs
9993                         # completes.
9994                         self._choose_pkg_return_early = True
9995
9996                 return chosen_pkg
9997
9998         def _dependent_on_scheduled_merges(self, pkg, later):
9999                 """
10000                 Traverse the subgraph of the given packages deep dependencies
10001                 to see if it contains any scheduled merges.
10002                 @param pkg: a package to check dependencies for
10003                 @type pkg: Package
10004                 @param later: packages for which dependence should be ignored
10005                         since they will be merged later than pkg anyway and therefore
10006                         delaying the merge of pkg will not result in a more optimal
10007                         merge order
10008                 @type later: set
10009                 @rtype: bool
10010                 @returns: True if the package is dependent, False otherwise.
10011                 """
10012
10013                 graph = self._digraph
10014                 completed_tasks = self._completed_tasks
10015
10016                 dependent = False
10017                 traversed_nodes = set([pkg])
10018                 direct_deps = graph.child_nodes(pkg)
10019                 node_stack = direct_deps
10020                 direct_deps = frozenset(direct_deps)
10021                 while node_stack:
10022                         node = node_stack.pop()
10023                         if node in traversed_nodes:
10024                                 continue
10025                         traversed_nodes.add(node)
10026                         if not ((node.installed and node.operation == "nomerge") or \
10027                                 (node.operation == "uninstall" and \
10028                                 node not in direct_deps) or \
10029                                 node in completed_tasks or \
10030                                 node in later):
10031                                 dependent = True
10032                                 break
10033                         node_stack.extend(graph.child_nodes(node))
10034
10035                 return dependent
10036
10037         def _allocate_config(self, root):
10038                 """
10039                 Allocate a unique config instance for a task in order
10040                 to prevent interference between parallel tasks.
10041                 """
10042                 if self._config_pool[root]:
10043                         temp_settings = self._config_pool[root].pop()
10044                 else:
10045                         temp_settings = portage.config(clone=self.pkgsettings[root])
10046                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10047                 # performance reasons, call it here to make sure all settings from the
10048                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10049                 temp_settings.reload()
10050                 temp_settings.reset()
10051                 return temp_settings
10052
10053         def _deallocate_config(self, settings):
10054                 self._config_pool[settings["ROOT"]].append(settings)
10055
10056         def _main_loop(self):
10057
10058                 # Only allow 1 job max if a restart is scheduled
10059                 # due to portage update.
10060                 if self._is_restart_scheduled() or \
10061                         self._opts_no_background.intersection(self.myopts):
10062                         self._set_max_jobs(1)
10063
10064                 merge_queue = self._task_queues.merge
10065
10066                 while self._schedule():
10067                         if self._poll_event_handlers:
10068                                 self._poll_loop()
10069
10070                 while True:
10071                         self._schedule()
10072                         if not (self._jobs or merge_queue):
10073                                 break
10074                         if self._poll_event_handlers:
10075                                 self._poll_loop()
10076
10077         def _keep_scheduling(self):
10078                 return bool(self._pkg_queue and \
10079                         not (self._failed_pkgs and not self._build_opts.fetchonly))
10080
10081         def _schedule_tasks(self):
10082                 self._schedule_tasks_imp()
10083                 self._status_display.display()
10084
10085                 state_change = 0
10086                 for q in self._task_queues.values():
10087                         if q.schedule():
10088                                 state_change += 1
10089
10090                 # Cancel prefetchers if they're the only reason
10091                 # the main poll loop is still running.
10092                 if self._failed_pkgs and not self._build_opts.fetchonly and \
10093                         not (self._jobs or self._task_queues.merge) and \
10094                         self._task_queues.fetch:
10095                         self._task_queues.fetch.clear()
10096                         state_change += 1
10097
10098                 if state_change:
10099                         self._schedule_tasks_imp()
10100                         self._status_display.display()
10101
10102                 return self._keep_scheduling()
10103
10104         def _job_delay(self):
10105                 """
10106                 @rtype: bool
10107                 @returns: True if job scheduling should be delayed, False otherwise.
10108                 """
10109
10110                 if self._jobs and self._max_load is not None:
10111
10112                         current_time = time.time()
10113
10114                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10115                         if delay > self._job_delay_max:
10116                                 delay = self._job_delay_max
10117                         if (current_time - self._previous_job_start_time) < delay:
10118                                 return True
10119
10120                 return False
10121
10122         def _schedule_tasks_imp(self):
10123                 """
10124                 @rtype: bool
10125                 @returns: True if state changed, False otherwise.
10126                 """
10127
10128                 state_change = 0
10129
10130                 while True:
10131
10132                         if not self._keep_scheduling():
10133                                 return bool(state_change)
10134
10135                         if self._choose_pkg_return_early or \
10136                                 not self._can_add_job() or \
10137                                 self._job_delay():
10138                                 return bool(state_change)
10139
10140                         pkg = self._choose_pkg()
10141                         if pkg is None:
10142                                 return bool(state_change)
10143
10144                         state_change += 1
10145
10146                         if not pkg.installed:
10147                                 self._pkg_count.curval += 1
10148
10149                         task = self._task(pkg)
10150
10151                         if pkg.installed:
10152                                 merge = PackageMerge(merge=task)
10153                                 merge.addExitListener(self._merge_exit)
10154                                 self._task_queues.merge.add(merge)
10155
10156                         elif pkg.built:
10157                                 self._jobs += 1
10158                                 self._previous_job_start_time = time.time()
10159                                 self._status_display.running = self._jobs
10160                                 task.addExitListener(self._extract_exit)
10161                                 self._task_queues.jobs.add(task)
10162
10163                         else:
10164                                 self._jobs += 1
10165                                 self._previous_job_start_time = time.time()
10166                                 self._status_display.running = self._jobs
10167                                 task.addExitListener(self._build_exit)
10168                                 self._task_queues.jobs.add(task)
10169
10170                 return bool(state_change)
10171
10172         def _task(self, pkg):
10173
10174                 pkg_to_replace = None
10175                 if pkg.operation != "uninstall":
10176                         vardb = pkg.root_config.trees["vartree"].dbapi
10177                         previous_cpv = vardb.match(pkg.slot_atom)
10178                         if previous_cpv:
10179                                 previous_cpv = previous_cpv.pop()
10180                                 pkg_to_replace = self._pkg(previous_cpv,
10181                                         "installed", pkg.root_config, installed=True)
10182
10183                 task = MergeListItem(args_set=self._args_set,
10184                         background=self._background, binpkg_opts=self._binpkg_opts,
10185                         build_opts=self._build_opts,
10186                         config_pool=self._ConfigPool(pkg.root,
10187                         self._allocate_config, self._deallocate_config),
10188                         emerge_opts=self.myopts,
10189                         find_blockers=self._find_blockers(pkg), logger=self._logger,
10190                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10191                         pkg_to_replace=pkg_to_replace,
10192                         prefetcher=self._prefetchers.get(pkg),
10193                         scheduler=self._sched_iface,
10194                         settings=self._allocate_config(pkg.root),
10195                         statusMessage=self._status_msg,
10196                         world_atom=self._world_atom)
10197
10198                 return task
10199
10200         def _failed_pkg_msg(self, failed_pkg, action, preposition):
10201                 pkg = failed_pkg.pkg
10202                 msg = "%s to %s %s" % \
10203                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10204                 if pkg.root != "/":
10205                         msg += " %s %s" % (preposition, pkg.root)
10206
10207                 log_path = self._locate_failure_log(failed_pkg)
10208                 if log_path is not None:
10209                         msg += ", Log file:"
10210                 self._status_msg(msg)
10211
10212                 if log_path is not None:
10213                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10214
10215         def _status_msg(self, msg):
10216                 """
10217                 Display a brief status message (no newlines) in the status display.
10218                 This is called by tasks to provide feedback to the user. This
10219                 delegates the resposibility of generating \r and \n control characters,
10220                 to guarantee that lines are created or erased when necessary and
10221                 appropriate.
10222
10223                 @type msg: str
10224                 @param msg: a brief status message (no newlines allowed)
10225                 """
10226                 if not self._background:
10227                         writemsg_level("\n")
10228                 self._status_display.displayMessage(msg)
10229
10230         def _save_resume_list(self):
10231                 """
10232                 Do this before verifying the ebuild Manifests since it might
10233                 be possible for the user to use --resume --skipfirst get past
10234                 a non-essential package with a broken digest.
10235                 """
10236                 mtimedb = self._mtimedb
10237                 mtimedb["resume"]["mergelist"] = [list(x) \
10238                         for x in self._mergelist \
10239                         if isinstance(x, Package) and x.operation == "merge"]
10240
10241                 mtimedb.commit()
10242
10243         def _calc_resume_list(self):
10244                 """
10245                 Use the current resume list to calculate a new one,
10246                 dropping any packages with unsatisfied deps.
10247                 @rtype: bool
10248                 @returns: True if successful, False otherwise.
10249                 """
10250                 print colorize("GOOD", "*** Resuming merge...")
10251
10252                 if self._show_list():
10253                         if "--tree" in self.myopts:
10254                                 portage.writemsg_stdout("\n" + \
10255                                         darkgreen("These are the packages that " + \
10256                                         "would be merged, in reverse order:\n\n"))
10257
10258                         else:
10259                                 portage.writemsg_stdout("\n" + \
10260                                         darkgreen("These are the packages that " + \
10261                                         "would be merged, in order:\n\n"))
10262
10263                 show_spinner = "--quiet" not in self.myopts and \
10264                         "--nodeps" not in self.myopts
10265
10266                 if show_spinner:
10267                         print "Calculating dependencies  ",
10268
10269                 myparams = create_depgraph_params(self.myopts, None)
10270                 success = False
10271                 e = None
10272                 try:
10273                         success, mydepgraph, dropped_tasks = resume_depgraph(
10274                                 self.settings, self.trees, self._mtimedb, self.myopts,
10275                                 myparams, self._spinner, skip_unsatisfied=True)
10276                 except depgraph.UnsatisfiedResumeDep, e:
10277                         mydepgraph = e.depgraph
10278                         dropped_tasks = set()
10279
10280                 if show_spinner:
10281                         print "\b\b... done!"
10282
10283                 if e is not None:
10284                         def unsatisfied_resume_dep_msg():
10285                                 mydepgraph.display_problems()
10286                                 out = portage.output.EOutput()
10287                                 out.eerror("One or more packages are either masked or " + \
10288                                         "have missing dependencies:")
10289                                 out.eerror("")
10290                                 indent = "  "
10291                                 show_parents = set()
10292                                 for dep in e.value:
10293                                         if dep.parent in show_parents:
10294                                                 continue
10295                                         show_parents.add(dep.parent)
10296                                         if dep.atom is None:
10297                                                 out.eerror(indent + "Masked package:")
10298                                                 out.eerror(2 * indent + str(dep.parent))
10299                                                 out.eerror("")
10300                                         else:
10301                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
10302                                                 out.eerror(2 * indent + str(dep.parent))
10303                                                 out.eerror("")
10304                                 msg = "The resume list contains packages " + \
10305                                         "that are either masked or have " + \
10306                                         "unsatisfied dependencies. " + \
10307                                         "Please restart/continue " + \
10308                                         "the operation manually, or use --skipfirst " + \
10309                                         "to skip the first package in the list and " + \
10310                                         "any other packages that may be " + \
10311                                         "masked or have missing dependencies."
10312                                 for line in textwrap.wrap(msg, 72):
10313                                         out.eerror(line)
10314                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10315                         return False
10316
10317                 if success and self._show_list():
10318                         mylist = mydepgraph.altlist()
10319                         if mylist:
10320                                 if "--tree" in self.myopts:
10321                                         mylist.reverse()
10322                                 mydepgraph.display(mylist, favorites=self._favorites)
10323
10324                 if not success:
10325                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10326                         return False
10327                 mydepgraph.display_problems()
10328
10329                 mylist = mydepgraph.altlist()
10330                 mydepgraph.break_refs(mylist)
10331                 self._mergelist = mylist
10332                 self._set_digraph(mydepgraph.schedulerGraph())
10333
10334                 msg_width = 75
10335                 for task in dropped_tasks:
10336                         if not (isinstance(task, Package) and task.operation == "merge"):
10337                                 continue
10338                         pkg = task
10339                         msg = "emerge --keep-going:" + \
10340                                 " %s" % (pkg.cpv,)
10341                         if pkg.root != "/":
10342                                 msg += " for %s" % (pkg.root,)
10343                         msg += " dropped due to unsatisfied dependency."
10344                         for line in textwrap.wrap(msg, msg_width):
10345                                 eerror(line, phase="other", key=pkg.cpv)
10346                         settings = self.pkgsettings[pkg.root]
10347                         # Ensure that log collection from $T is disabled inside
10348                         # elog_process(), since any logs that might exist are
10349                         # not valid here.
10350                         settings.pop("T", None)
10351                         portage.elog.elog_process(pkg.cpv, settings)
10352
10353                 return True
10354
10355         def _show_list(self):
10356                 myopts = self.myopts
10357                 if "--quiet" not in myopts and \
10358                         ("--ask" in myopts or "--tree" in myopts or \
10359                         "--verbose" in myopts):
10360                         return True
10361                 return False
10362
10363         def _world_atom(self, pkg):
10364                 """
10365                 Add the package to the world file, but only if
10366                 it's supposed to be added. Otherwise, do nothing.
10367                 """
10368
10369                 if set(("--buildpkgonly", "--fetchonly",
10370                         "--fetch-all-uri",
10371                         "--oneshot", "--onlydeps",
10372                         "--pretend")).intersection(self.myopts):
10373                         return
10374
10375                 if pkg.root != self.target_root:
10376                         return
10377
10378                 args_set = self._args_set
10379                 if not args_set.findAtomForPackage(pkg):
10380                         return
10381
10382                 logger = self._logger
10383                 pkg_count = self._pkg_count
10384                 root_config = pkg.root_config
10385                 world_set = root_config.sets["world"]
10386                 world_locked = False
10387                 if hasattr(world_set, "lock"):
10388                         world_set.lock()
10389                         world_locked = True
10390
10391                 try:
10392                         if hasattr(world_set, "load"):
10393                                 world_set.load() # maybe it's changed on disk
10394
10395                         atom = create_world_atom(pkg, args_set, root_config)
10396                         if atom:
10397                                 if hasattr(world_set, "add"):
10398                                         self._status_msg(('Recording %s in "world" ' + \
10399                                                 'favorites file...') % atom)
10400                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
10401                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10402                                         world_set.add(atom)
10403                                 else:
10404                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10405                                                 (atom,), level=logging.WARN, noiselevel=-1)
10406                 finally:
10407                         if world_locked:
10408                                 world_set.unlock()
10409
10410         def _pkg(self, cpv, type_name, root_config, installed=False):
10411                 """
10412                 Get a package instance from the cache, or create a new
10413                 one if necessary. Raises KeyError from aux_get if it
10414                 failures for some reason (package does not exist or is
10415                 corrupt).
10416                 """
10417                 operation = "merge"
10418                 if installed:
10419                         operation = "nomerge"
10420
10421                 if self._digraph is not None:
10422                         # Reuse existing instance when available.
10423                         pkg = self._digraph.get(
10424                                 (type_name, root_config.root, cpv, operation))
10425                         if pkg is not None:
10426                                 return pkg
10427
10428                 tree_type = depgraph.pkg_tree_map[type_name]
10429                 db = root_config.trees[tree_type].dbapi
10430                 db_keys = list(self.trees[root_config.root][
10431                         tree_type].dbapi._aux_cache_keys)
10432                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
10433                 pkg = Package(cpv=cpv, metadata=metadata,
10434                         root_config=root_config, installed=installed)
10435                 if type_name == "ebuild":
10436                         settings = self.pkgsettings[root_config.root]
10437                         settings.setcpv(pkg)
10438                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
10439
10440                 return pkg
10441
10442 class MetadataRegen(PollScheduler):
10443
10444         def __init__(self, portdb, max_jobs=None, max_load=None):
10445                 PollScheduler.__init__(self)
10446                 self._portdb = portdb
10447
10448                 if max_jobs is None:
10449                         max_jobs = 1
10450
10451                 self._max_jobs = max_jobs
10452                 self._max_load = max_load
10453                 self._sched_iface = self._sched_iface_class(
10454                         register=self._register,
10455                         schedule=self._schedule_wait,
10456                         unregister=self._unregister)
10457
10458                 self._valid_pkgs = set()
10459                 self._process_iter = self._iter_metadata_processes()
10460
10461         def _iter_metadata_processes(self):
10462                 portdb = self._portdb
10463                 valid_pkgs = self._valid_pkgs
10464                 every_cp = portdb.cp_all()
10465                 every_cp.sort(reverse=True)
10466
10467                 while every_cp:
10468                         cp = every_cp.pop()
10469                         portage.writemsg_stdout("Processing %s\n" % cp)
10470                         cpv_list = portdb.cp_list(cp)
10471                         for cpv in cpv_list:
10472                                 valid_pkgs.add(cpv)
10473                                 ebuild_path, repo_path = portdb.findname2(cpv)
10474                                 metadata_process = portdb._metadata_process(
10475                                         cpv, ebuild_path, repo_path)
10476                                 if metadata_process is None:
10477                                         continue
10478                                 yield metadata_process
10479
10480         def run(self):
10481
10482                 portdb = self._portdb
10483                 from portage.cache.cache_errors import CacheError
10484                 dead_nodes = {}
10485
10486                 for mytree in portdb.porttrees:
10487                         try:
10488                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
10489                         except CacheError, e:
10490                                 portage.writemsg("Error listing cache entries for " + \
10491                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
10492                                 del e
10493                                 dead_nodes = None
10494                                 break
10495
10496                 while self._schedule():
10497                         self._poll_loop()
10498
10499                 while self._jobs:
10500                         self._poll_loop()
10501
10502                 if dead_nodes:
10503                         for y in self._valid_pkgs:
10504                                 for mytree in portdb.porttrees:
10505                                         if portdb.findname2(y, mytree=mytree)[0]:
10506                                                 dead_nodes[mytree].discard(y)
10507
10508                         for mytree, nodes in dead_nodes.iteritems():
10509                                 auxdb = portdb.auxdb[mytree]
10510                                 for y in nodes:
10511                                         try:
10512                                                 del auxdb[y]
10513                                         except (KeyError, CacheError):
10514                                                 pass
10515
10516         def _schedule_tasks(self):
10517                 """
10518                 @rtype: bool
10519                 @returns: True if there may be remaining tasks to schedule,
10520                         False otherwise.
10521                 """
10522                 while self._can_add_job():
10523                         try:
10524                                 metadata_process = self._process_iter.next()
10525                         except StopIteration:
10526                                 return False
10527
10528                         self._jobs += 1
10529                         metadata_process.scheduler = self._sched_iface
10530                         metadata_process.addExitListener(self._metadata_exit)
10531                         metadata_process.start()
10532                 return True
10533
10534         def _metadata_exit(self, metadata_process):
10535                 self._jobs -= 1
10536                 if metadata_process.returncode != os.EX_OK:
10537                         self._valid_pkgs.discard(metadata_process.cpv)
10538                         portage.writemsg("Error processing %s, continuing...\n" % \
10539                                 (metadata_process.cpv,))
10540                 self._schedule()
10541
10542 class UninstallFailure(portage.exception.PortageException):
10543         """
10544         An instance of this class is raised by unmerge() when
10545         an uninstallation fails.
10546         """
10547         status = 1
10548         def __init__(self, *pargs):
10549                 portage.exception.PortageException.__init__(self, pargs)
10550                 if pargs:
10551                         self.status = pargs[0]
10552
10553 def unmerge(root_config, myopts, unmerge_action,
10554         unmerge_files, ldpath_mtimes, autoclean=0,
10555         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
10556         scheduler=None, writemsg_level=portage.util.writemsg_level):
10557
10558         quiet = "--quiet" in myopts
10559         settings = root_config.settings
10560         sets = root_config.sets
10561         vartree = root_config.trees["vartree"]
10562         candidate_catpkgs=[]
10563         global_unmerge=0
10564         xterm_titles = "notitles" not in settings.features
10565         out = portage.output.EOutput()
10566         pkg_cache = {}
10567         db_keys = list(vartree.dbapi._aux_cache_keys)
10568
10569         def _pkg(cpv):
10570                 pkg = pkg_cache.get(cpv)
10571                 if pkg is None:
10572                         pkg = Package(cpv=cpv, installed=True,
10573                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
10574                                 root_config=root_config,
10575                                 type_name="installed")
10576                         pkg_cache[cpv] = pkg
10577                 return pkg
10578
10579         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10580         try:
10581                 # At least the parent needs to exist for the lock file.
10582                 portage.util.ensure_dirs(vdb_path)
10583         except portage.exception.PortageException:
10584                 pass
10585         vdb_lock = None
10586         try:
10587                 if os.access(vdb_path, os.W_OK):
10588                         vdb_lock = portage.locks.lockdir(vdb_path)
10589                 realsyslist = sets["system"].getAtoms()
10590                 syslist = []
10591                 for x in realsyslist:
10592                         mycp = portage.dep_getkey(x)
10593                         if mycp in settings.getvirtuals():
10594                                 providers = []
10595                                 for provider in settings.getvirtuals()[mycp]:
10596                                         if vartree.dbapi.match(provider):
10597                                                 providers.append(provider)
10598                                 if len(providers) == 1:
10599                                         syslist.extend(providers)
10600                         else:
10601                                 syslist.append(mycp)
10602         
10603                 mysettings = portage.config(clone=settings)
10604         
10605                 if not unmerge_files:
10606                         if unmerge_action == "unmerge":
10607                                 print
10608                                 print bold("emerge unmerge") + " can only be used with specific package names"
10609                                 print
10610                                 return 0
10611                         else:
10612                                 global_unmerge = 1
10613         
10614                 localtree = vartree
10615                 # process all arguments and add all
10616                 # valid db entries to candidate_catpkgs
10617                 if global_unmerge:
10618                         if not unmerge_files:
10619                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
10620                 else:
10621                         #we've got command-line arguments
10622                         if not unmerge_files:
10623                                 print "\nNo packages to unmerge have been provided.\n"
10624                                 return 0
10625                         for x in unmerge_files:
10626                                 arg_parts = x.split('/')
10627                                 if x[0] not in [".","/"] and \
10628                                         arg_parts[-1][-7:] != ".ebuild":
10629                                         #possible cat/pkg or dep; treat as such
10630                                         candidate_catpkgs.append(x)
10631                                 elif unmerge_action in ["prune","clean"]:
10632                                         print "\n!!! Prune and clean do not accept individual" + \
10633                                                 " ebuilds as arguments;\n    skipping.\n"
10634                                         continue
10635                                 else:
10636                                         # it appears that the user is specifying an installed
10637                                         # ebuild and we're in "unmerge" mode, so it's ok.
10638                                         if not os.path.exists(x):
10639                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
10640                                                 return 0
10641         
10642                                         absx   = os.path.abspath(x)
10643                                         sp_absx = absx.split("/")
10644                                         if sp_absx[-1][-7:] == ".ebuild":
10645                                                 del sp_absx[-1]
10646                                                 absx = "/".join(sp_absx)
10647         
10648                                         sp_absx_len = len(sp_absx)
10649         
10650                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
10651                                         vdb_len  = len(vdb_path)
10652         
10653                                         sp_vdb     = vdb_path.split("/")
10654                                         sp_vdb_len = len(sp_vdb)
10655         
10656                                         if not os.path.exists(absx+"/CONTENTS"):
10657                                                 print "!!! Not a valid db dir: "+str(absx)
10658                                                 return 0
10659         
10660                                         if sp_absx_len <= sp_vdb_len:
10661                                                 # The Path is shorter... so it can't be inside the vdb.
10662                                                 print sp_absx
10663                                                 print absx
10664                                                 print "\n!!!",x,"cannot be inside "+ \
10665                                                         vdb_path+"; aborting.\n"
10666                                                 return 0
10667         
10668                                         for idx in range(0,sp_vdb_len):
10669                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
10670                                                         print sp_absx
10671                                                         print absx
10672                                                         print "\n!!!", x, "is not inside "+\
10673                                                                 vdb_path+"; aborting.\n"
10674                                                         return 0
10675         
10676                                         print "="+"/".join(sp_absx[sp_vdb_len:])
10677                                         candidate_catpkgs.append(
10678                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
10679         
10680                 newline=""
10681                 if (not "--quiet" in myopts):
10682                         newline="\n"
10683                 if settings["ROOT"] != "/":
10684                         writemsg_level(darkgreen(newline+ \
10685                                 ">>> Using system located in ROOT tree %s\n" % \
10686                                 settings["ROOT"]))
10687
10688                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
10689                         not ("--quiet" in myopts):
10690                         writemsg_level(darkgreen(newline+\
10691                                 ">>> These are the packages that would be unmerged:\n"))
10692
10693                 # Preservation of order is required for --depclean and --prune so
10694                 # that dependencies are respected. Use all_selected to eliminate
10695                 # duplicate packages since the same package may be selected by
10696                 # multiple atoms.
10697                 pkgmap = []
10698                 all_selected = set()
10699                 for x in candidate_catpkgs:
10700                         # cycle through all our candidate deps and determine
10701                         # what will and will not get unmerged
10702                         try:
10703                                 mymatch = vartree.dbapi.match(x)
10704                         except portage.exception.AmbiguousPackageName, errpkgs:
10705                                 print "\n\n!!! The short ebuild name \"" + \
10706                                         x + "\" is ambiguous.  Please specify"
10707                                 print "!!! one of the following fully-qualified " + \
10708                                         "ebuild names instead:\n"
10709                                 for i in errpkgs[0]:
10710                                         print "    " + green(i)
10711                                 print
10712                                 sys.exit(1)
10713         
10714                         if not mymatch and x[0] not in "<>=~":
10715                                 mymatch = localtree.dep_match(x)
10716                         if not mymatch:
10717                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
10718                                         (x, unmerge_action), noiselevel=-1)
10719                                 continue
10720
10721                         pkgmap.append(
10722                                 {"protected": set(), "selected": set(), "omitted": set()})
10723                         mykey = len(pkgmap) - 1
10724                         if unmerge_action=="unmerge":
10725                                         for y in mymatch:
10726                                                 if y not in all_selected:
10727                                                         pkgmap[mykey]["selected"].add(y)
10728                                                         all_selected.add(y)
10729                         elif unmerge_action == "prune":
10730                                 if len(mymatch) == 1:
10731                                         continue
10732                                 best_version = mymatch[0]
10733                                 best_slot = vartree.getslot(best_version)
10734                                 best_counter = vartree.dbapi.cpv_counter(best_version)
10735                                 for mypkg in mymatch[1:]:
10736                                         myslot = vartree.getslot(mypkg)
10737                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
10738                                         if (myslot == best_slot and mycounter > best_counter) or \
10739                                                 mypkg == portage.best([mypkg, best_version]):
10740                                                 if myslot == best_slot:
10741                                                         if mycounter < best_counter:
10742                                                                 # On slot collision, keep the one with the
10743                                                                 # highest counter since it is the most
10744                                                                 # recently installed.
10745                                                                 continue
10746                                                 best_version = mypkg
10747                                                 best_slot = myslot
10748                                                 best_counter = mycounter
10749                                 pkgmap[mykey]["protected"].add(best_version)
10750                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
10751                                         if mypkg != best_version and mypkg not in all_selected)
10752                                 all_selected.update(pkgmap[mykey]["selected"])
10753                         else:
10754                                 # unmerge_action == "clean"
10755                                 slotmap={}
10756                                 for mypkg in mymatch:
10757                                         if unmerge_action == "clean":
10758                                                 myslot = localtree.getslot(mypkg)
10759                                         else:
10760                                                 # since we're pruning, we don't care about slots
10761                                                 # and put all the pkgs in together
10762                                                 myslot = 0
10763                                         if myslot not in slotmap:
10764                                                 slotmap[myslot] = {}
10765                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
10766                                 
10767                                 for myslot in slotmap:
10768                                         counterkeys = slotmap[myslot].keys()
10769                                         if not counterkeys:
10770                                                 continue
10771                                         counterkeys.sort()
10772                                         pkgmap[mykey]["protected"].add(
10773                                                 slotmap[myslot][counterkeys[-1]])
10774                                         del counterkeys[-1]
10775                                         #be pretty and get them in order of merge:
10776                                         for ckey in counterkeys:
10777                                                 mypkg = slotmap[myslot][ckey]
10778                                                 if mypkg not in all_selected:
10779                                                         pkgmap[mykey]["selected"].add(mypkg)
10780                                                         all_selected.add(mypkg)
10781                                         # ok, now the last-merged package
10782                                         # is protected, and the rest are selected
10783                 numselected = len(all_selected)
10784                 if global_unmerge and not numselected:
10785                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
10786                         return 0
10787         
10788                 if not numselected:
10789                         portage.writemsg_stdout(
10790                                 "\n>>> No packages selected for removal by " + \
10791                                 unmerge_action + "\n")
10792                         return 0
10793         finally:
10794                 if vdb_lock:
10795                         vartree.dbapi.flush_cache()
10796                         portage.locks.unlockdir(vdb_lock)
10797
10798         for cp in xrange(len(pkgmap)):
10799                 for cpv in pkgmap[cp]["selected"].copy():
10800                         try:
10801                                 pkg = _pkg(cpv)
10802                         except KeyError:
10803                                 # It could have been uninstalled
10804                                 # by a concurrent process.
10805                                 continue
10806
10807                         if unmerge_action != "clean" and \
10808                                 root_config.root == "/" and \
10809                                 portage.match_from_list(
10810                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10811                                 msg = ("Not unmerging package %s since there is no valid " + \
10812                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
10813                                 for line in textwrap.wrap(msg, 75):
10814                                         out.eerror(line)
10815                                 # adjust pkgmap so the display output is correct
10816                                 pkgmap[cp]["selected"].remove(cpv)
10817                                 all_selected.remove(cpv)
10818                                 pkgmap[cp]["protected"].add(cpv)
10819                                 continue
10820
10821         numselected = len(all_selected)
10822         if not numselected:
10823                 writemsg_level(
10824                         "\n>>> No packages selected for removal by " + \
10825                         unmerge_action + "\n")
10826                 return 0
10827
10828         # Unmerge order only matters in some cases
10829         if not ordered:
10830                 unordered = {}
10831                 for d in pkgmap:
10832                         selected = d["selected"]
10833                         if not selected:
10834                                 continue
10835                         cp = portage.cpv_getkey(iter(selected).next())
10836                         cp_dict = unordered.get(cp)
10837                         if cp_dict is None:
10838                                 cp_dict = {}
10839                                 unordered[cp] = cp_dict
10840                                 for k in d:
10841                                         cp_dict[k] = set()
10842                         for k, v in d.iteritems():
10843                                 cp_dict[k].update(v)
10844                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
10845
10846         for x in xrange(len(pkgmap)):
10847                 selected = pkgmap[x]["selected"]
10848                 if not selected:
10849                         continue
10850                 for mytype, mylist in pkgmap[x].iteritems():
10851                         if mytype == "selected":
10852                                 continue
10853                         mylist.difference_update(all_selected)
10854                 cp = portage.cpv_getkey(iter(selected).next())
10855                 for y in localtree.dep_match(cp):
10856                         if y not in pkgmap[x]["omitted"] and \
10857                                 y not in pkgmap[x]["selected"] and \
10858                                 y not in pkgmap[x]["protected"] and \
10859                                 y not in all_selected:
10860                                 pkgmap[x]["omitted"].add(y)
10861                 if global_unmerge and not pkgmap[x]["selected"]:
10862                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
10863                         continue
10864                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
10865                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
10866                                 "'%s' is part of your system profile.\n" % cp),
10867                                 level=logging.WARNING, noiselevel=-1)
10868                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
10869                                 "be damaging to your system.\n\n"),
10870                                 level=logging.WARNING, noiselevel=-1)
10871                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
10872                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
10873                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
10874                 if not quiet:
10875                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
10876                 else:
10877                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
10878                 for mytype in ["selected","protected","omitted"]:
10879                         if not quiet:
10880                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
10881                         if pkgmap[x][mytype]:
10882                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
10883                                 sorted_pkgs.sort(portage.pkgcmp)
10884                                 for pn, ver, rev in sorted_pkgs:
10885                                         if rev == "r0":
10886                                                 myversion = ver
10887                                         else:
10888                                                 myversion = ver + "-" + rev
10889                                         if mytype == "selected":
10890                                                 writemsg_level(
10891                                                         colorize("UNMERGE_WARN", myversion + " "),
10892                                                         noiselevel=-1)
10893                                         else:
10894                                                 writemsg_level(
10895                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
10896                         else:
10897                                 writemsg_level("none ", noiselevel=-1)
10898                         if not quiet:
10899                                 writemsg_level("\n", noiselevel=-1)
10900                 if quiet:
10901                         writemsg_level("\n", noiselevel=-1)
10902
10903         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
10904                 " packages are slated for removal.\n")
10905         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
10906                         " and " + colorize("GOOD", "'omitted'") + \
10907                         " packages will not be removed.\n\n")
10908
10909         if "--pretend" in myopts:
10910                 #we're done... return
10911                 return 0
10912         if "--ask" in myopts:
10913                 if userquery("Would you like to unmerge these packages?")=="No":
10914                         # enter pretend mode for correct formatting of results
10915                         myopts["--pretend"] = True
10916                         print
10917                         print "Quitting."
10918                         print
10919                         return 0
10920         #the real unmerging begins, after a short delay....
10921         if clean_delay and not autoclean:
10922                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
10923
10924         for x in xrange(len(pkgmap)):
10925                 for y in pkgmap[x]["selected"]:
10926                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
10927                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
10928                         mysplit = y.split("/")
10929                         #unmerge...
10930                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
10931                                 mysettings, unmerge_action not in ["clean","prune"],
10932                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
10933                                 scheduler=scheduler)
10934
10935                         if retval != os.EX_OK:
10936                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
10937                                 if raise_on_error:
10938                                         raise UninstallFailure(retval)
10939                                 sys.exit(retval)
10940                         else:
10941                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
10942                                         sets["world"].cleanPackage(vartree.dbapi, y)
10943                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
10944         if clean_world and hasattr(sets["world"], "remove"):
10945                 for s in root_config.setconfig.active:
10946                         sets["world"].remove(SETPREFIX+s)
10947         return 1
10948
10949 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
10950
10951         if os.path.exists("/usr/bin/install-info"):
10952                 out = portage.output.EOutput()
10953                 regen_infodirs=[]
10954                 for z in infodirs:
10955                         if z=='':
10956                                 continue
10957                         inforoot=normpath(root+z)
10958                         if os.path.isdir(inforoot):
10959                                 infomtime = long(os.stat(inforoot).st_mtime)
10960                                 if inforoot not in prev_mtimes or \
10961                                         prev_mtimes[inforoot] != infomtime:
10962                                                 regen_infodirs.append(inforoot)
10963
10964                 if not regen_infodirs:
10965                         portage.writemsg_stdout("\n")
10966                         out.einfo("GNU info directory index is up-to-date.")
10967                 else:
10968                         portage.writemsg_stdout("\n")
10969                         out.einfo("Regenerating GNU info directory index...")
10970
10971                         dir_extensions = ("", ".gz", ".bz2")
10972                         icount=0
10973                         badcount=0
10974                         errmsg = ""
10975                         for inforoot in regen_infodirs:
10976                                 if inforoot=='':
10977                                         continue
10978
10979                                 if not os.path.isdir(inforoot) or \
10980                                         not os.access(inforoot, os.W_OK):
10981                                         continue
10982
10983                                 file_list = os.listdir(inforoot)
10984                                 file_list.sort()
10985                                 dir_file = os.path.join(inforoot, "dir")
10986                                 moved_old_dir = False
10987                                 processed_count = 0
10988                                 for x in file_list:
10989                                         if x.startswith(".") or \
10990                                                 os.path.isdir(os.path.join(inforoot, x)):
10991                                                 continue
10992                                         if x.startswith("dir"):
10993                                                 skip = False
10994                                                 for ext in dir_extensions:
10995                                                         if x == "dir" + ext or \
10996                                                                 x == "dir" + ext + ".old":
10997                                                                 skip = True
10998                                                                 break
10999                                                 if skip:
11000                                                         continue
11001                                         if processed_count == 0:
11002                                                 for ext in dir_extensions:
11003                                                         try:
11004                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
11005                                                                 moved_old_dir = True
11006                                                         except EnvironmentError, e:
11007                                                                 if e.errno != errno.ENOENT:
11008                                                                         raise
11009                                                                 del e
11010                                         processed_count += 1
11011                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11012                                         existsstr="already exists, for file `"
11013                                         if myso!="":
11014                                                 if re.search(existsstr,myso):
11015                                                         # Already exists... Don't increment the count for this.
11016                                                         pass
11017                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
11018                                                         # This info file doesn't contain a DIR-header: install-info produces this
11019                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
11020                                                         # Don't increment the count for this.
11021                                                         pass
11022                                                 else:
11023                                                         badcount=badcount+1
11024                                                         errmsg += myso + "\n"
11025                                         icount=icount+1
11026
11027                                 if moved_old_dir and not os.path.exists(dir_file):
11028                                         # We didn't generate a new dir file, so put the old file
11029                                         # back where it was originally found.
11030                                         for ext in dir_extensions:
11031                                                 try:
11032                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
11033                                                 except EnvironmentError, e:
11034                                                         if e.errno != errno.ENOENT:
11035                                                                 raise
11036                                                         del e
11037
11038                                 # Clean dir.old cruft so that they don't prevent
11039                                 # unmerge of otherwise empty directories.
11040                                 for ext in dir_extensions:
11041                                         try:
11042                                                 os.unlink(dir_file + ext + ".old")
11043                                         except EnvironmentError, e:
11044                                                 if e.errno != errno.ENOENT:
11045                                                         raise
11046                                                 del e
11047
11048                                 #update mtime so we can potentially avoid regenerating.
11049                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11050
11051                         if badcount:
11052                                 out.eerror("Processed %d info files; %d errors." % \
11053                                         (icount, badcount))
11054                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11055                         else:
11056                                 if icount > 0:
11057                                         out.einfo("Processed %d info files." % (icount,))
11058
11059
11060 def display_news_notification(root_config, myopts):
11061         target_root = root_config.root
11062         trees = root_config.trees
11063         settings = trees["vartree"].settings
11064         portdb = trees["porttree"].dbapi
11065         vardb = trees["vartree"].dbapi
11066         NEWS_PATH = os.path.join("metadata", "news")
11067         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11068         newsReaderDisplay = False
11069         update = "--pretend" not in myopts
11070
11071         for repo in portdb.getRepositories():
11072                 unreadItems = checkUpdatedNewsItems(
11073                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11074                 if unreadItems:
11075                         if not newsReaderDisplay:
11076                                 newsReaderDisplay = True
11077                                 print
11078                         print colorize("WARN", " * IMPORTANT:"),
11079                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11080                         
11081         
11082         if newsReaderDisplay:
11083                 print colorize("WARN", " *"),
11084                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11085                 print
11086
11087 def _flush_elog_mod_echo():
11088         """
11089         Dump the mod_echo output now so that our other
11090         notifications are shown last.
11091         @rtype: bool
11092         @returns: True if messages were shown, False otherwise.
11093         """
11094         messages_shown = False
11095         try:
11096                 from portage.elog import mod_echo
11097         except ImportError:
11098                 pass # happens during downgrade to a version without the module
11099         else:
11100                 messages_shown = bool(mod_echo._items)
11101                 mod_echo.finalize()
11102         return messages_shown
11103
11104 def post_emerge(root_config, myopts, mtimedb, retval):
11105         """
11106         Misc. things to run at the end of a merge session.
11107         
11108         Update Info Files
11109         Update Config Files
11110         Update News Items
11111         Commit mtimeDB
11112         Display preserved libs warnings
11113         Exit Emerge
11114
11115         @param trees: A dictionary mapping each ROOT to it's package databases
11116         @type trees: dict
11117         @param mtimedb: The mtimeDB to store data needed across merge invocations
11118         @type mtimedb: MtimeDB class instance
11119         @param retval: Emerge's return value
11120         @type retval: Int
11121         @rype: None
11122         @returns:
11123         1.  Calls sys.exit(retval)
11124         """
11125
11126         target_root = root_config.root
11127         trees = { target_root : root_config.trees }
11128         vardbapi = trees[target_root]["vartree"].dbapi
11129         settings = vardbapi.settings
11130         info_mtimes = mtimedb["info"]
11131
11132         # Load the most current variables from ${ROOT}/etc/profile.env
11133         settings.unlock()
11134         settings.reload()
11135         settings.regenerate()
11136         settings.lock()
11137
11138         config_protect = settings.get("CONFIG_PROTECT","").split()
11139         infodirs = settings.get("INFOPATH","").split(":") + \
11140                 settings.get("INFODIR","").split(":")
11141
11142         os.chdir("/")
11143
11144         if retval == os.EX_OK:
11145                 exit_msg = " *** exiting successfully."
11146         else:
11147                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11148         emergelog("notitles" not in settings.features, exit_msg)
11149
11150         _flush_elog_mod_echo()
11151
11152         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11153         if counter_hash is not None and \
11154                 counter_hash == vardbapi._counter_hash():
11155                 # If vdb state has not changed then there's nothing else to do.
11156                 sys.exit(retval)
11157
11158         vdb_path = os.path.join(target_root, portage.VDB_PATH)
11159         portage.util.ensure_dirs(vdb_path)
11160         vdb_lock = None
11161         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11162                 vdb_lock = portage.locks.lockdir(vdb_path)
11163
11164         if vdb_lock:
11165                 try:
11166                         if "noinfo" not in settings.features:
11167                                 chk_updated_info_files(target_root,
11168                                         infodirs, info_mtimes, retval)
11169                         mtimedb.commit()
11170                 finally:
11171                         if vdb_lock:
11172                                 portage.locks.unlockdir(vdb_lock)
11173
11174         chk_updated_cfg_files(target_root, config_protect)
11175         
11176         display_news_notification(root_config, myopts)
11177
11178         sys.exit(retval)
11179
11180
11181 def chk_updated_cfg_files(target_root, config_protect):
11182         if config_protect:
11183                 #number of directories with some protect files in them
11184                 procount=0
11185                 for x in config_protect:
11186                         x = os.path.join(target_root, x.lstrip(os.path.sep))
11187                         if not os.access(x, os.W_OK):
11188                                 # Avoid Permission denied errors generated
11189                                 # later by `find`.
11190                                 continue
11191                         try:
11192                                 mymode = os.lstat(x).st_mode
11193                         except OSError:
11194                                 continue
11195                         if stat.S_ISLNK(mymode):
11196                                 # We want to treat it like a directory if it
11197                                 # is a symlink to an existing directory.
11198                                 try:
11199                                         real_mode = os.stat(x).st_mode
11200                                         if stat.S_ISDIR(real_mode):
11201                                                 mymode = real_mode
11202                                 except OSError:
11203                                         pass
11204                         if stat.S_ISDIR(mymode):
11205                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11206                         else:
11207                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11208                                         os.path.split(x.rstrip(os.path.sep))
11209                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11210                         a = commands.getstatusoutput(mycommand)
11211                         if a[0] != 0:
11212                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11213                                 sys.stderr.flush()
11214                                 # Show the error message alone, sending stdout to /dev/null.
11215                                 os.system(mycommand + " 1>/dev/null")
11216                         else:
11217                                 files = a[1].split('\0')
11218                                 # split always produces an empty string as the last element
11219                                 if files and not files[-1]:
11220                                         del files[-1]
11221                                 if files:
11222                                         procount += 1
11223                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
11224                                         if stat.S_ISDIR(mymode):
11225                                                  print "%d config files in '%s' need updating." % \
11226                                                         (len(files), x)
11227                                         else:
11228                                                  print "config file '%s' needs updating." % x
11229
11230                 if procount:
11231                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11232                                 " section of the " + bold("emerge")
11233                         print " "+yellow("*")+" man page to learn how to update config files."
11234
11235 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11236         update=False):
11237         """
11238         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
11239         Returns the number of unread (yet relevent) items.
11240         
11241         @param portdb: a portage tree database
11242         @type portdb: pordbapi
11243         @param vardb: an installed package database
11244         @type vardb: vardbapi
11245         @param NEWS_PATH:
11246         @type NEWS_PATH:
11247         @param UNREAD_PATH:
11248         @type UNREAD_PATH:
11249         @param repo_id:
11250         @type repo_id:
11251         @rtype: Integer
11252         @returns:
11253         1.  The number of unread but relevant news items.
11254         
11255         """
11256         from portage.news import NewsManager
11257         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
11258         return manager.getUnreadItems( repo_id, update=update )
11259
11260 def insert_category_into_atom(atom, category):
11261         alphanum = re.search(r'\w', atom)
11262         if alphanum:
11263                 ret = atom[:alphanum.start()] + "%s/" % category + \
11264                         atom[alphanum.start():]
11265         else:
11266                 ret = None
11267         return ret
11268
11269 def is_valid_package_atom(x):
11270         if "/" not in x:
11271                 alphanum = re.search(r'\w', x)
11272                 if alphanum:
11273                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
11274         return portage.isvalidatom(x)
11275
11276 def show_blocker_docs_link():
11277         print
11278         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
11279         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
11280         print
11281         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
11282         print
11283
11284 def show_mask_docs():
11285         print "For more information, see the MASKED PACKAGES section in the emerge"
11286         print "man page or refer to the Gentoo Handbook."
11287
11288 def action_sync(settings, trees, mtimedb, myopts, myaction):
11289         xterm_titles = "notitles" not in settings.features
11290         emergelog(xterm_titles, " === sync")
11291         myportdir = settings.get("PORTDIR", None)
11292         out = portage.output.EOutput()
11293         if not myportdir:
11294                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
11295                 sys.exit(1)
11296         if myportdir[-1]=="/":
11297                 myportdir=myportdir[:-1]
11298         if not os.path.exists(myportdir):
11299                 print ">>>",myportdir,"not found, creating it."
11300                 os.makedirs(myportdir,0755)
11301         syncuri = settings.get("SYNC", "").strip()
11302         if not syncuri:
11303                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
11304                         noiselevel=-1, level=logging.ERROR)
11305                 return 1
11306
11307         os.umask(0022)
11308         updatecache_flg = False
11309         if myaction == "metadata":
11310                 print "skipping sync"
11311                 updatecache_flg = True
11312         elif syncuri[:8]=="rsync://":
11313                 if not os.path.exists("/usr/bin/rsync"):
11314                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
11315                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
11316                         sys.exit(1)
11317                 mytimeout=180
11318
11319                 rsync_opts = []
11320                 import shlex, StringIO
11321                 if settings["PORTAGE_RSYNC_OPTS"] == "":
11322                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
11323                         rsync_opts.extend([
11324                                 "--recursive",    # Recurse directories
11325                                 "--links",        # Consider symlinks
11326                                 "--safe-links",   # Ignore links outside of tree
11327                                 "--perms",        # Preserve permissions
11328                                 "--times",        # Preserive mod times
11329                                 "--compress",     # Compress the data transmitted
11330                                 "--force",        # Force deletion on non-empty dirs
11331                                 "--whole-file",   # Don't do block transfers, only entire files
11332                                 "--delete",       # Delete files that aren't in the master tree
11333                                 "--stats",        # Show final statistics about what was transfered
11334                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
11335                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
11336                                 "--exclude=/local",       # Exclude local     from consideration
11337                                 "--exclude=/packages",    # Exclude packages  from consideration
11338                         ])
11339
11340                 else:
11341                         # The below validation is not needed when using the above hardcoded
11342                         # defaults.
11343
11344                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
11345                         lexer = shlex.shlex(StringIO.StringIO(
11346                                 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
11347                         lexer.whitespace_split = True
11348                         rsync_opts.extend(lexer)
11349                         del lexer
11350
11351                         for opt in ("--recursive", "--times"):
11352                                 if opt not in rsync_opts:
11353                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
11354                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11355                                         rsync_opts.append(opt)
11356         
11357                         for exclude in ("distfiles", "local", "packages"):
11358                                 opt = "--exclude=/%s" % exclude
11359                                 if opt not in rsync_opts:
11360                                         portage.writemsg(yellow("WARNING:") + \
11361                                         " adding required option %s not included in "  % opt + \
11362                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
11363                                         rsync_opts.append(opt)
11364         
11365                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
11366                                 def rsync_opt_startswith(opt_prefix):
11367                                         for x in rsync_opts:
11368                                                 if x.startswith(opt_prefix):
11369                                                         return True
11370                                         return False
11371
11372                                 if not rsync_opt_startswith("--timeout="):
11373                                         rsync_opts.append("--timeout=%d" % mytimeout)
11374
11375                                 for opt in ("--compress", "--whole-file"):
11376                                         if opt not in rsync_opts:
11377                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
11378                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
11379                                                 rsync_opts.append(opt)
11380
11381                 if "--quiet" in myopts:
11382                         rsync_opts.append("--quiet")    # Shut up a lot
11383                 else:
11384                         rsync_opts.append("--verbose")  # Print filelist
11385
11386                 if "--verbose" in myopts:
11387                         rsync_opts.append("--progress")  # Progress meter for each file
11388
11389                 if "--debug" in myopts:
11390                         rsync_opts.append("--checksum") # Force checksum on all files
11391
11392                 # Real local timestamp file.
11393                 servertimestampfile = os.path.join(
11394                         myportdir, "metadata", "timestamp.chk")
11395
11396                 content = portage.util.grabfile(servertimestampfile)
11397                 mytimestamp = 0
11398                 if content:
11399                         try:
11400                                 mytimestamp = time.mktime(time.strptime(content[0],
11401                                         "%a, %d %b %Y %H:%M:%S +0000"))
11402                         except (OverflowError, ValueError):
11403                                 pass
11404                 del content
11405
11406                 try:
11407                         rsync_initial_timeout = \
11408                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
11409                 except ValueError:
11410                         rsync_initial_timeout = 15
11411
11412                 try:
11413                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
11414                 except SystemExit, e:
11415                         raise # Needed else can't exit
11416                 except:
11417                         maxretries=3 #default number of retries
11418
11419                 retries=0
11420                 user_name, hostname, port = re.split(
11421                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
11422                 if port is None:
11423                         port=""
11424                 if user_name is None:
11425                         user_name=""
11426                 updatecache_flg=True
11427                 all_rsync_opts = set(rsync_opts)
11428                 lexer = shlex.shlex(StringIO.StringIO(
11429                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
11430                 lexer.whitespace_split = True
11431                 extra_rsync_opts = list(lexer)
11432                 del lexer
11433                 all_rsync_opts.update(extra_rsync_opts)
11434                 family = socket.AF_INET
11435                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
11436                         family = socket.AF_INET
11437                 elif socket.has_ipv6 and \
11438                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
11439                         family = socket.AF_INET6
11440                 ips=[]
11441                 SERVER_OUT_OF_DATE = -1
11442                 EXCEEDED_MAX_RETRIES = -2
11443                 while (1):
11444                         if ips:
11445                                 del ips[0]
11446                         if ips==[]:
11447                                 try:
11448                                         for addrinfo in socket.getaddrinfo(
11449                                                 hostname, None, family, socket.SOCK_STREAM):
11450                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
11451                                                         # IPv6 addresses need to be enclosed in square brackets
11452                                                         ips.append("[%s]" % addrinfo[4][0])
11453                                                 else:
11454                                                         ips.append(addrinfo[4][0])
11455                                         from random import shuffle
11456                                         shuffle(ips)
11457                                 except SystemExit, e:
11458                                         raise # Needed else can't exit
11459                                 except Exception, e:
11460                                         print "Notice:",str(e)
11461                                         dosyncuri=syncuri
11462
11463                         if ips:
11464                                 try:
11465                                         dosyncuri = syncuri.replace(
11466                                                 "//" + user_name + hostname + port + "/",
11467                                                 "//" + user_name + ips[0] + port + "/", 1)
11468                                 except SystemExit, e:
11469                                         raise # Needed else can't exit
11470                                 except Exception, e:
11471                                         print "Notice:",str(e)
11472                                         dosyncuri=syncuri
11473
11474                         if (retries==0):
11475                                 if "--ask" in myopts:
11476                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
11477                                                 print
11478                                                 print "Quitting."
11479                                                 print
11480                                                 sys.exit(0)
11481                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
11482                                 if "--quiet" not in myopts:
11483                                         print ">>> Starting rsync with "+dosyncuri+"..."
11484                         else:
11485                                 emergelog(xterm_titles,
11486                                         ">>> Starting retry %d of %d with %s" % \
11487                                                 (retries,maxretries,dosyncuri))
11488                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
11489
11490                         if mytimestamp != 0 and "--quiet" not in myopts:
11491                                 print ">>> Checking server timestamp ..."
11492
11493                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
11494
11495                         if "--debug" in myopts:
11496                                 print rsynccommand
11497
11498                         exitcode = os.EX_OK
11499                         servertimestamp = 0
11500                         # Even if there's no timestamp available locally, fetch the
11501                         # timestamp anyway as an initial probe to verify that the server is
11502                         # responsive.  This protects us from hanging indefinitely on a
11503                         # connection attempt to an unresponsive server which rsync's
11504                         # --timeout option does not prevent.
11505                         if True:
11506                                 # Temporary file for remote server timestamp comparison.
11507                                 from tempfile import mkstemp
11508                                 fd, tmpservertimestampfile = mkstemp()
11509                                 os.close(fd)
11510                                 mycommand = rsynccommand[:]
11511                                 mycommand.append(dosyncuri.rstrip("/") + \
11512                                         "/metadata/timestamp.chk")
11513                                 mycommand.append(tmpservertimestampfile)
11514                                 content = None
11515                                 mypids = []
11516                                 try:
11517                                         def timeout_handler(signum, frame):
11518                                                 raise portage.exception.PortageException("timed out")
11519                                         signal.signal(signal.SIGALRM, timeout_handler)
11520                                         # Timeout here in case the server is unresponsive.  The
11521                                         # --timeout rsync option doesn't apply to the initial
11522                                         # connection attempt.
11523                                         if rsync_initial_timeout:
11524                                                 signal.alarm(rsync_initial_timeout)
11525                                         try:
11526                                                 mypids.extend(portage.process.spawn(
11527                                                         mycommand, env=settings.environ(), returnpid=True))
11528                                                 exitcode = os.waitpid(mypids[0], 0)[1]
11529                                                 content = portage.grabfile(tmpservertimestampfile)
11530                                         finally:
11531                                                 if rsync_initial_timeout:
11532                                                         signal.alarm(0)
11533                                                 try:
11534                                                         os.unlink(tmpservertimestampfile)
11535                                                 except OSError:
11536                                                         pass
11537                                 except portage.exception.PortageException, e:
11538                                         # timed out
11539                                         print e
11540                                         del e
11541                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
11542                                                 os.kill(mypids[0], signal.SIGTERM)
11543                                                 os.waitpid(mypids[0], 0)
11544                                         # This is the same code rsync uses for timeout.
11545                                         exitcode = 30
11546                                 else:
11547                                         if exitcode != os.EX_OK:
11548                                                 if exitcode & 0xff:
11549                                                         exitcode = (exitcode & 0xff) << 8
11550                                                 else:
11551                                                         exitcode = exitcode >> 8
11552                                 if mypids:
11553                                         portage.process.spawned_pids.remove(mypids[0])
11554                                 if content:
11555                                         try:
11556                                                 servertimestamp = time.mktime(time.strptime(
11557                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
11558                                         except (OverflowError, ValueError):
11559                                                 pass
11560                                 del mycommand, mypids, content
11561                         if exitcode == os.EX_OK:
11562                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
11563                                         emergelog(xterm_titles,
11564                                                 ">>> Cancelling sync -- Already current.")
11565                                         print
11566                                         print ">>>"
11567                                         print ">>> Timestamps on the server and in the local repository are the same."
11568                                         print ">>> Cancelling all further sync action. You are already up to date."
11569                                         print ">>>"
11570                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
11571                                         print ">>>"
11572                                         print
11573                                         sys.exit(0)
11574                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
11575                                         emergelog(xterm_titles,
11576                                                 ">>> Server out of date: %s" % dosyncuri)
11577                                         print
11578                                         print ">>>"
11579                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
11580                                         print ">>>"
11581                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
11582                                         print ">>>"
11583                                         print
11584                                         exitcode = SERVER_OUT_OF_DATE
11585                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
11586                                         # actual sync
11587                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
11588                                         exitcode = portage.process.spawn(mycommand,
11589                                                 env=settings.environ())
11590                                         if exitcode in [0,1,3,4,11,14,20,21]:
11591                                                 break
11592                         elif exitcode in [1,3,4,11,14,20,21]:
11593                                 break
11594                         else:
11595                                 # Code 2 indicates protocol incompatibility, which is expected
11596                                 # for servers with protocol < 29 that don't support
11597                                 # --prune-empty-directories.  Retry for a server that supports
11598                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
11599                                 pass
11600
11601                         retries=retries+1
11602
11603                         if retries<=maxretries:
11604                                 print ">>> Retrying..."
11605                                 time.sleep(11)
11606                         else:
11607                                 # over retries
11608                                 # exit loop
11609                                 updatecache_flg=False
11610                                 exitcode = EXCEEDED_MAX_RETRIES
11611                                 break
11612
11613                 if (exitcode==0):
11614                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
11615                 elif exitcode == SERVER_OUT_OF_DATE:
11616                         sys.exit(1)
11617                 elif exitcode == EXCEEDED_MAX_RETRIES:
11618                         sys.stderr.write(
11619                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
11620                         sys.exit(1)
11621                 elif (exitcode>0):
11622                         msg = []
11623                         if exitcode==1:
11624                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
11625                                 msg.append("that your SYNC statement is proper.")
11626                                 msg.append("SYNC=" + settings["SYNC"])
11627                         elif exitcode==11:
11628                                 msg.append("Rsync has reported that there is a File IO error. Normally")
11629                                 msg.append("this means your disk is full, but can be caused by corruption")
11630                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
11631                                 msg.append("and try again after the problem has been fixed.")
11632                                 msg.append("PORTDIR=" + settings["PORTDIR"])
11633                         elif exitcode==20:
11634                                 msg.append("Rsync was killed before it finished.")
11635                         else:
11636                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
11637                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
11638                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
11639                                 msg.append("temporary problem unless complications exist with your network")
11640                                 msg.append("(and possibly your system's filesystem) configuration.")
11641                         for line in msg:
11642                                 out.eerror(line)
11643                         sys.exit(exitcode)
11644         elif syncuri[:6]=="cvs://":
11645                 if not os.path.exists("/usr/bin/cvs"):
11646                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
11647                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
11648                         sys.exit(1)
11649                 cvsroot=syncuri[6:]
11650                 cvsdir=os.path.dirname(myportdir)
11651                 if not os.path.exists(myportdir+"/CVS"):
11652                         #initial checkout
11653                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
11654                         if os.path.exists(cvsdir+"/gentoo-x86"):
11655                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
11656                                 sys.exit(1)
11657                         try:
11658                                 os.rmdir(myportdir)
11659                         except OSError, e:
11660                                 if e.errno != errno.ENOENT:
11661                                         sys.stderr.write(
11662                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
11663                                         sys.exit(1)
11664                                 del e
11665                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
11666                                 print "!!! cvs checkout error; exiting."
11667                                 sys.exit(1)
11668                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
11669                 else:
11670                         #cvs update
11671                         print ">>> Starting cvs update with "+syncuri+"..."
11672                         retval = portage.spawn("cd '%s'; cvs -z0 -q update -dP" % \
11673                                 myportdir, settings, free=1)
11674                         if retval != os.EX_OK:
11675                                 sys.exit(retval)
11676                 dosyncuri = syncuri
11677         else:
11678                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
11679                         noiselevel=-1, level=logging.ERROR)
11680                 return 1
11681
11682         if updatecache_flg and  \
11683                 myaction != "metadata" and \
11684                 "metadata-transfer" not in settings.features:
11685                 updatecache_flg = False
11686
11687         # Reload the whole config from scratch.
11688         settings, trees, mtimedb = load_emerge_config(trees=trees)
11689         root_config = trees[settings["ROOT"]]["root_config"]
11690         portdb = trees[settings["ROOT"]]["porttree"].dbapi
11691
11692         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
11693                 action_metadata(settings, portdb, myopts)
11694
11695         if portage._global_updates(trees, mtimedb["updates"]):
11696                 mtimedb.commit()
11697                 # Reload the whole config from scratch.
11698                 settings, trees, mtimedb = load_emerge_config(trees=trees)
11699                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
11700                 root_config = trees[settings["ROOT"]]["root_config"]
11701
11702         mybestpv = portdb.xmatch("bestmatch-visible",
11703                 portage.const.PORTAGE_PACKAGE_ATOM)
11704         mypvs = portage.best(
11705                 trees[settings["ROOT"]]["vartree"].dbapi.match(
11706                 portage.const.PORTAGE_PACKAGE_ATOM))
11707
11708         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
11709
11710         if myaction != "metadata":
11711                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
11712                         retval = portage.process.spawn(
11713                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
11714                                 dosyncuri], env=settings.environ())
11715                         if retval != os.EX_OK:
11716                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
11717
11718         if(mybestpv != mypvs) and not "--quiet" in myopts:
11719                 print
11720                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
11721                 print red(" * ")+"that you update portage now, before any other packages are updated."
11722                 print
11723                 print red(" * ")+"To update portage, run 'emerge portage' now."
11724                 print
11725         
11726         display_news_notification(root_config, myopts)
11727         return os.EX_OK
11728
11729 def action_metadata(settings, portdb, myopts):
11730         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
11731         old_umask = os.umask(0002)
11732         cachedir = os.path.normpath(settings.depcachedir)
11733         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
11734                                         "/lib", "/opt", "/proc", "/root", "/sbin",
11735                                         "/sys", "/tmp", "/usr",  "/var"]:
11736                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
11737                         "ROOT DIRECTORY ON YOUR SYSTEM."
11738                 print >> sys.stderr, \
11739                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
11740                 sys.exit(73)
11741         if not os.path.exists(cachedir):
11742                 os.mkdir(cachedir)
11743
11744         ec = portage.eclass_cache.cache(portdb.porttree_root)
11745         myportdir = os.path.realpath(settings["PORTDIR"])
11746         cm = settings.load_best_module("portdbapi.metadbmodule")(
11747                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
11748
11749         from portage.cache import util
11750
11751         class percentage_noise_maker(util.quiet_mirroring):
11752                 def __init__(self, dbapi):
11753                         self.dbapi = dbapi
11754                         self.cp_all = dbapi.cp_all()
11755                         l = len(self.cp_all)
11756                         self.call_update_min = 100000000
11757                         self.min_cp_all = l/100.0
11758                         self.count = 1
11759                         self.pstr = ''
11760
11761                 def __iter__(self):
11762                         for x in self.cp_all:
11763                                 self.count += 1
11764                                 if self.count > self.min_cp_all:
11765                                         self.call_update_min = 0
11766                                         self.count = 0
11767                                 for y in self.dbapi.cp_list(x):
11768                                         yield y
11769                         self.call_update_mine = 0
11770
11771                 def update(self, *arg):
11772                         try:                            self.pstr = int(self.pstr) + 1
11773                         except ValueError:      self.pstr = 1
11774                         sys.stdout.write("%s%i%%" % \
11775                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
11776                         sys.stdout.flush()
11777                         self.call_update_min = 10000000
11778
11779                 def finish(self, *arg):
11780                         sys.stdout.write("\b\b\b\b100%\n")
11781                         sys.stdout.flush()
11782
11783         if "--quiet" in myopts:
11784                 def quicky_cpv_generator(cp_all_list):
11785                         for x in cp_all_list:
11786                                 for y in portdb.cp_list(x):
11787                                         yield y
11788                 source = quicky_cpv_generator(portdb.cp_all())
11789                 noise_maker = portage.cache.util.quiet_mirroring()
11790         else:
11791                 noise_maker = source = percentage_noise_maker(portdb)
11792         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
11793                 eclass_cache=ec, verbose_instance=noise_maker)
11794
11795         sys.stdout.flush()
11796         os.umask(old_umask)
11797
11798 def action_regen(settings, portdb, max_jobs, max_load):
11799         xterm_titles = "notitles" not in settings.features
11800         emergelog(xterm_titles, " === regen")
11801         #regenerate cache entries
11802         portage.writemsg_stdout("Regenerating cache entries...\n")
11803         try:
11804                 os.close(sys.stdin.fileno())
11805         except SystemExit, e:
11806                 raise # Needed else can't exit
11807         except:
11808                 pass
11809         sys.stdout.flush()
11810
11811         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
11812         regen.run()
11813
11814         portage.writemsg_stdout("done!\n")
11815
11816 def action_config(settings, trees, myopts, myfiles):
11817         if len(myfiles) != 1:
11818                 print red("!!! config can only take a single package atom at this time\n")
11819                 sys.exit(1)
11820         if not is_valid_package_atom(myfiles[0]):
11821                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
11822                         noiselevel=-1)
11823                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
11824                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
11825                 sys.exit(1)
11826         print
11827         try:
11828                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
11829         except portage.exception.AmbiguousPackageName, e:
11830                 # Multiple matches thrown from cpv_expand
11831                 pkgs = e.args[0]
11832         if len(pkgs) == 0:
11833                 print "No packages found.\n"
11834                 sys.exit(0)
11835         elif len(pkgs) > 1:
11836                 if "--ask" in myopts:
11837                         options = []
11838                         print "Please select a package to configure:"
11839                         idx = 0
11840                         for pkg in pkgs:
11841                                 idx += 1
11842                                 options.append(str(idx))
11843                                 print options[-1]+") "+pkg
11844                         print "X) Cancel"
11845                         options.append("X")
11846                         idx = userquery("Selection?", options)
11847                         if idx == "X":
11848                                 sys.exit(0)
11849                         pkg = pkgs[int(idx)-1]
11850                 else:
11851                         print "The following packages available:"
11852                         for pkg in pkgs:
11853                                 print "* "+pkg
11854                         print "\nPlease use a specific atom or the --ask option."
11855                         sys.exit(1)
11856         else:
11857                 pkg = pkgs[0]
11858
11859         print
11860         if "--ask" in myopts:
11861                 if userquery("Ready to configure "+pkg+"?") == "No":
11862                         sys.exit(0)
11863         else:
11864                 print "Configuring pkg..."
11865         print
11866         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
11867         mysettings = portage.config(clone=settings)
11868         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
11869         debug = mysettings.get("PORTAGE_DEBUG") == "1"
11870         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
11871                 mysettings,
11872                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
11873                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
11874         if retval == os.EX_OK:
11875                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
11876                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
11877         print
11878
11879 def action_info(settings, trees, myopts, myfiles):
11880         print getportageversion(settings["PORTDIR"], settings["ROOT"],
11881                 settings.profile_path, settings["CHOST"],
11882                 trees[settings["ROOT"]]["vartree"].dbapi)
11883         header_width = 65
11884         header_title = "System Settings"
11885         if myfiles:
11886                 print header_width * "="
11887                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
11888         print header_width * "="
11889         print "System uname: "+platform.platform(aliased=1)
11890
11891         lastSync = portage.grabfile(os.path.join(
11892                 settings["PORTDIR"], "metadata", "timestamp.chk"))
11893         print "Timestamp of tree:",
11894         if lastSync:
11895                 print lastSync[0]
11896         else:
11897                 print "Unknown"
11898
11899         output=commands.getstatusoutput("distcc --version")
11900         if not output[0]:
11901                 print str(output[1].split("\n",1)[0]),
11902                 if "distcc" in settings.features:
11903                         print "[enabled]"
11904                 else:
11905                         print "[disabled]"
11906
11907         output=commands.getstatusoutput("ccache -V")
11908         if not output[0]:
11909                 print str(output[1].split("\n",1)[0]),
11910                 if "ccache" in settings.features:
11911                         print "[enabled]"
11912                 else:
11913                         print "[disabled]"
11914
11915         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
11916                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
11917         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
11918         myvars  = portage.util.unique_array(myvars)
11919         myvars.sort()
11920
11921         for x in myvars:
11922                 if portage.isvalidatom(x):
11923                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
11924                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
11925                         pkg_matches.sort(portage.pkgcmp)
11926                         pkgs = []
11927                         for pn, ver, rev in pkg_matches:
11928                                 if rev != "r0":
11929                                         pkgs.append(ver + "-" + rev)
11930                                 else:
11931                                         pkgs.append(ver)
11932                         if pkgs:
11933                                 pkgs = ", ".join(pkgs)
11934                                 print "%-20s %s" % (x+":", pkgs)
11935                 else:
11936                         print "%-20s %s" % (x+":", "[NOT VALID]")
11937
11938         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
11939
11940         if "--verbose" in myopts:
11941                 myvars=settings.keys()
11942         else:
11943                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
11944                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
11945                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
11946                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
11947
11948                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
11949
11950         myvars = portage.util.unique_array(myvars)
11951         unset_vars = []
11952         myvars.sort()
11953         for x in myvars:
11954                 if x in settings:
11955                         if x != "USE":
11956                                 print '%s="%s"' % (x, settings[x])
11957                         else:
11958                                 use = set(settings["USE"].split())
11959                                 use_expand = settings["USE_EXPAND"].split()
11960                                 use_expand.sort()
11961                                 for varname in use_expand:
11962                                         flag_prefix = varname.lower() + "_"
11963                                         for f in list(use):
11964                                                 if f.startswith(flag_prefix):
11965                                                         use.remove(f)
11966                                 use = list(use)
11967                                 use.sort()
11968                                 print 'USE="%s"' % " ".join(use),
11969                                 for varname in use_expand:
11970                                         myval = settings.get(varname)
11971                                         if myval:
11972                                                 print '%s="%s"' % (varname, myval),
11973                                 print
11974                 else:
11975                         unset_vars.append(x)
11976         if unset_vars:
11977                 print "Unset:  "+", ".join(unset_vars)
11978         print
11979
11980         if "--debug" in myopts:
11981                 for x in dir(portage):
11982                         module = getattr(portage, x)
11983                         if "cvs_id_string" in dir(module):
11984                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
11985
11986         # See if we can find any packages installed matching the strings
11987         # passed on the command line
11988         mypkgs = []
11989         vardb = trees[settings["ROOT"]]["vartree"].dbapi
11990         portdb = trees[settings["ROOT"]]["porttree"].dbapi
11991         for x in myfiles:
11992                 mypkgs.extend(vardb.match(x))
11993
11994         # If some packages were found...
11995         if mypkgs:
11996                 # Get our global settings (we only print stuff if it varies from
11997                 # the current config)
11998                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
11999                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12000                 global_vals = {}
12001                 pkgsettings = portage.config(clone=settings)
12002
12003                 for myvar in mydesiredvars:
12004                         global_vals[myvar] = set(settings.get(myvar, "").split())
12005
12006                 # Loop through each package
12007                 # Only print settings if they differ from global settings
12008                 header_title = "Package Settings"
12009                 print header_width * "="
12010                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12011                 print header_width * "="
12012                 from portage.output import EOutput
12013                 out = EOutput()
12014                 for pkg in mypkgs:
12015                         # Get all package specific variables
12016                         auxvalues = vardb.aux_get(pkg, auxkeys)
12017                         valuesmap = {}
12018                         for i in xrange(len(auxkeys)):
12019                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12020                         diff_values = {}
12021                         for myvar in mydesiredvars:
12022                                 # If the package variable doesn't match the
12023                                 # current global variable, something has changed
12024                                 # so set diff_found so we know to print
12025                                 if valuesmap[myvar] != global_vals[myvar]:
12026                                         diff_values[myvar] = valuesmap[myvar]
12027                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12028                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12029                         pkgsettings.reset()
12030                         # If a matching ebuild is no longer available in the tree, maybe it
12031                         # would make sense to compare against the flags for the best
12032                         # available version with the same slot?
12033                         mydb = None
12034                         if portdb.cpv_exists(pkg):
12035                                 mydb = portdb
12036                         pkgsettings.setcpv(pkg, mydb=mydb)
12037                         if valuesmap["IUSE"].intersection(
12038                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12039                                 diff_values["USE"] = valuesmap["USE"]
12040                         # If a difference was found, print the info for
12041                         # this package.
12042                         if diff_values:
12043                                 # Print package info
12044                                 print "%s was built with the following:" % pkg
12045                                 for myvar in mydesiredvars + ["USE"]:
12046                                         if myvar in diff_values:
12047                                                 mylist = list(diff_values[myvar])
12048                                                 mylist.sort()
12049                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
12050                                 print
12051                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
12052                         ebuildpath = vardb.findname(pkg)
12053                         if not ebuildpath or not os.path.exists(ebuildpath):
12054                                 out.ewarn("No ebuild found for '%s'" % pkg)
12055                                 continue
12056                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
12057                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
12058                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
12059                                 tree="vartree")
12060
12061 def action_search(root_config, myopts, myfiles, spinner):
12062         if not myfiles:
12063                 print "emerge: no search terms provided."
12064         else:
12065                 searchinstance = search(root_config,
12066                         spinner, "--searchdesc" in myopts,
12067                         "--quiet" not in myopts, "--usepkg" in myopts,
12068                         "--usepkgonly" in myopts)
12069                 for mysearch in myfiles:
12070                         try:
12071                                 searchinstance.execute(mysearch)
12072                         except re.error, comment:
12073                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
12074                                 sys.exit(1)
12075                         searchinstance.output()
12076
12077 def action_depclean(settings, trees, ldpath_mtimes,
12078         myopts, action, myfiles, spinner):
12079         # Kill packages that aren't explicitly merged or are required as a
12080         # dependency of another package. World file is explicit.
12081
12082         # Global depclean or prune operations are not very safe when there are
12083         # missing dependencies since it's unknown how badly incomplete
12084         # the dependency graph is, and we might accidentally remove packages
12085         # that should have been pulled into the graph. On the other hand, it's
12086         # relatively safe to ignore missing deps when only asked to remove
12087         # specific packages.
12088         allow_missing_deps = len(myfiles) > 0
12089
12090         msg = []
12091         msg.append("Depclean may break link level dependencies.  Thus, it is\n")
12092         msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
12093         msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
12094         msg.append("\n")
12095         msg.append("Always study the list of packages to be cleaned for any obvious\n")
12096         msg.append("mistakes. Packages that are part of the world set will always\n")
12097         msg.append("be kept.  They can be manually added to this set with\n")
12098         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
12099         msg.append("package.provided (see portage(5)) will be removed by\n")
12100         msg.append("depclean, even if they are part of the world set.\n")
12101         msg.append("\n")
12102         msg.append("As a safety measure, depclean will not remove any packages\n")
12103         msg.append("unless *all* required dependencies have been resolved.  As a\n")
12104         msg.append("consequence, it is often necessary to run %s\n" % \
12105                 good("`emerge --update"))
12106         msg.append(good("--newuse --deep world`") + \
12107                 " prior to depclean.\n")
12108
12109         if action == "depclean" and "--quiet" not in myopts and not myfiles:
12110                 portage.writemsg_stdout("\n")
12111                 for x in msg:
12112                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
12113
12114         xterm_titles = "notitles" not in settings.features
12115         myroot = settings["ROOT"]
12116         root_config = trees[myroot]["root_config"]
12117         getSetAtoms = root_config.setconfig.getSetAtoms
12118         vardb = trees[myroot]["vartree"].dbapi
12119
12120         required_set_names = ("system", "world")
12121         required_sets = {}
12122         set_args = []
12123
12124         for s in required_set_names:
12125                 required_sets[s] = InternalPackageSet(
12126                         initial_atoms=getSetAtoms(s))
12127
12128         
12129         # When removing packages, use a temporary version of world
12130         # which excludes packages that are intended to be eligible for
12131         # removal.
12132         world_temp_set = required_sets["world"]
12133         system_set = required_sets["system"]
12134
12135         if not system_set or not world_temp_set:
12136
12137                 if not system_set:
12138                         writemsg_level("!!! You have no system list.\n",
12139                                 level=logging.ERROR, noiselevel=-1)
12140
12141                 if not world_temp_set:
12142                         writemsg_level("!!! You have no world file.\n",
12143                                         level=logging.WARNING, noiselevel=-1)
12144
12145                 writemsg_level("!!! Proceeding is likely to " + \
12146                         "break your installation.\n",
12147                         level=logging.WARNING, noiselevel=-1)
12148                 if "--pretend" not in myopts:
12149                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
12150
12151         if action == "depclean":
12152                 emergelog(xterm_titles, " >>> depclean")
12153
12154         import textwrap
12155         args_set = InternalPackageSet()
12156         if myfiles:
12157                 for x in myfiles:
12158                         if not is_valid_package_atom(x):
12159                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
12160                                         level=logging.ERROR, noiselevel=-1)
12161                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
12162                                 return
12163                         try:
12164                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
12165                         except portage.exception.AmbiguousPackageName, e:
12166                                 msg = "The short ebuild name \"" + x + \
12167                                         "\" is ambiguous.  Please specify " + \
12168                                         "one of the following " + \
12169                                         "fully-qualified ebuild names instead:"
12170                                 for line in textwrap.wrap(msg, 70):
12171                                         writemsg_level("!!! %s\n" % (line,),
12172                                                 level=logging.ERROR, noiselevel=-1)
12173                                 for i in e[0]:
12174                                         writemsg_level("    %s\n" % colorize("INFORM", i),
12175                                                 level=logging.ERROR, noiselevel=-1)
12176                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
12177                                 return
12178                         args_set.add(atom)
12179                 matched_packages = False
12180                 for x in args_set:
12181                         if vardb.match(x):
12182                                 matched_packages = True
12183                                 break
12184                 if not matched_packages:
12185                         writemsg_level(">>> No packages selected for removal by %s\n" % \
12186                                 action)
12187                         return
12188
12189         writemsg_level("\nCalculating dependencies  ")
12190         resolver_params = create_depgraph_params(myopts, "remove")
12191         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
12192         vardb = resolver.trees[myroot]["vartree"].dbapi
12193
12194         if action == "depclean":
12195
12196                 if args_set:
12197                         # Pull in everything that's installed but not matched
12198                         # by an argument atom since we don't want to clean any
12199                         # package if something depends on it.
12200
12201                         world_temp_set.clear()
12202                         for pkg in vardb:
12203                                 spinner.update()
12204
12205                                 try:
12206                                         if args_set.findAtomForPackage(pkg) is None:
12207                                                 world_temp_set.add("=" + pkg.cpv)
12208                                                 continue
12209                                 except portage.exception.InvalidDependString, e:
12210                                         show_invalid_depstring_notice(pkg,
12211                                                 pkg.metadata["PROVIDE"], str(e))
12212                                         del e
12213                                         world_temp_set.add("=" + pkg.cpv)
12214                                         continue
12215
12216         elif action == "prune":
12217
12218                 # Pull in everything that's installed since we don't
12219                 # to prune a package if something depends on it.
12220                 world_temp_set.clear()
12221                 world_temp_set.update(vardb.cp_all())
12222
12223                 if not args_set:
12224
12225                         # Try to prune everything that's slotted.
12226                         for cp in vardb.cp_all():
12227                                 if len(vardb.cp_list(cp)) > 1:
12228                                         args_set.add(cp)
12229
12230                 # Remove atoms from world that match installed packages
12231                 # that are also matched by argument atoms, but do not remove
12232                 # them if they match the highest installed version.
12233                 for pkg in vardb:
12234                         spinner.update()
12235                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
12236                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
12237                                 raise AssertionError("package expected in matches: " + \
12238                                         "cp = %s, cpv = %s matches = %s" % \
12239                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12240
12241                         highest_version = pkgs_for_cp[-1]
12242                         if pkg == highest_version:
12243                                 # pkg is the highest version
12244                                 world_temp_set.add("=" + pkg.cpv)
12245                                 continue
12246
12247                         if len(pkgs_for_cp) <= 1:
12248                                 raise AssertionError("more packages expected: " + \
12249                                         "cp = %s, cpv = %s matches = %s" % \
12250                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
12251
12252                         try:
12253                                 if args_set.findAtomForPackage(pkg) is None:
12254                                         world_temp_set.add("=" + pkg.cpv)
12255                                         continue
12256                         except portage.exception.InvalidDependString, e:
12257                                 show_invalid_depstring_notice(pkg,
12258                                         pkg.metadata["PROVIDE"], str(e))
12259                                 del e
12260                                 world_temp_set.add("=" + pkg.cpv)
12261                                 continue
12262
12263         set_args = {}
12264         for s, package_set in required_sets.iteritems():
12265                 set_atom = SETPREFIX + s
12266                 set_arg = SetArg(arg=set_atom, set=package_set,
12267                         root_config=resolver.roots[myroot])
12268                 set_args[s] = set_arg
12269                 for atom in set_arg.set:
12270                         resolver._dep_stack.append(
12271                                 Dependency(atom=atom, root=myroot, parent=set_arg))
12272                         resolver.digraph.add(set_arg, None)
12273
12274         success = resolver._complete_graph()
12275         writemsg_level("\b\b... done!\n")
12276
12277         resolver.display_problems()
12278
12279         if not success:
12280                 return 1
12281
12282         def unresolved_deps():
12283
12284                 unresolvable = set()
12285                 for dep in resolver._initially_unsatisfied_deps:
12286                         if isinstance(dep.parent, Package) and \
12287                                 (dep.priority > UnmergeDepPriority.SOFT):
12288                                 unresolvable.add((dep.atom, dep.parent.cpv))
12289
12290                 if not unresolvable:
12291                         return False
12292
12293                 if unresolvable and not allow_missing_deps:
12294                         prefix = bad(" * ")
12295                         msg = []
12296                         msg.append("Dependencies could not be completely resolved due to")
12297                         msg.append("the following required packages not being installed:")
12298                         msg.append("")
12299                         for atom, parent in unresolvable:
12300                                 msg.append("  %s pulled in by:" % (atom,))
12301                                 msg.append("    %s" % (parent,))
12302                                 msg.append("")
12303                         msg.append("Have you forgotten to run " + \
12304                                 good("`emerge --update --newuse --deep world`") + " prior to")
12305                         msg.append(("%s?  It may be necessary to manually " + \
12306                                 "uninstall packages that no longer") % action)
12307                         msg.append("exist in the portage tree since " + \
12308                                 "it may not be possible to satisfy their")
12309                         msg.append("dependencies.  Also, be aware of " + \
12310                                 "the --with-bdeps option that is documented")
12311                         msg.append("in " + good("`man emerge`") + ".")
12312                         if action == "prune":
12313                                 msg.append("")
12314                                 msg.append("If you would like to ignore " + \
12315                                         "dependencies then use %s." % good("--nodeps"))
12316                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
12317                                 level=logging.ERROR, noiselevel=-1)
12318                         return True
12319                 return False
12320
12321         if unresolved_deps():
12322                 return 1
12323
12324         graph = resolver.digraph.copy()
12325         required_pkgs_total = 0
12326         for node in graph:
12327                 if isinstance(node, Package):
12328                         required_pkgs_total += 1
12329
12330         def show_parents(child_node):
12331                 parent_nodes = graph.parent_nodes(child_node)
12332                 if not parent_nodes:
12333                         # With --prune, the highest version can be pulled in without any
12334                         # real parent since all installed packages are pulled in.  In that
12335                         # case there's nothing to show here.
12336                         return
12337                 parent_strs = []
12338                 for node in parent_nodes:
12339                         parent_strs.append(str(getattr(node, "cpv", node)))
12340                 parent_strs.sort()
12341                 msg = []
12342                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
12343                 for parent_str in parent_strs:
12344                         msg.append("    %s\n" % (parent_str,))
12345                 msg.append("\n")
12346                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
12347
12348         def create_cleanlist():
12349                 pkgs_to_remove = []
12350
12351                 if action == "depclean":
12352                         if args_set:
12353
12354                                 for pkg in vardb:
12355                                         arg_atom = None
12356                                         try:
12357                                                 arg_atom = args_set.findAtomForPackage(pkg)
12358                                         except portage.exception.InvalidDependString:
12359                                                 # this error has already been displayed by now
12360                                                 continue
12361
12362                                         if arg_atom:
12363                                                 if pkg not in graph:
12364                                                         pkgs_to_remove.append(pkg)
12365                                                 elif "--verbose" in myopts:
12366                                                         show_parents(pkg)
12367
12368                         else:
12369                                 for pkg in vardb:
12370                                         if pkg not in graph:
12371                                                 pkgs_to_remove.append(pkg)
12372                                         elif "--verbose" in myopts:
12373                                                 show_parents(pkg)
12374
12375                 elif action == "prune":
12376                         # Prune really uses all installed instead of world. It's not
12377                         # a real reverse dependency so don't display it as such.
12378                         graph.remove(set_args["world"])
12379
12380                         for atom in args_set:
12381                                 for pkg in vardb.match_pkgs(atom):
12382                                         if pkg not in graph:
12383                                                 pkgs_to_remove.append(pkg)
12384                                         elif "--verbose" in myopts:
12385                                                 show_parents(pkg)
12386
12387                 if not pkgs_to_remove:
12388                         writemsg_level(
12389                                 ">>> No packages selected for removal by %s\n" % action)
12390                         if "--verbose" not in myopts:
12391                                 writemsg_level(
12392                                         ">>> To see reverse dependencies, use %s\n" % \
12393                                                 good("--verbose"))
12394                         if action == "prune":
12395                                 writemsg_level(
12396                                         ">>> To ignore dependencies, use %s\n" % \
12397                                                 good("--nodeps"))
12398
12399                 return pkgs_to_remove
12400
12401         cleanlist = create_cleanlist()
12402
12403         if len(cleanlist):
12404                 clean_set = set(cleanlist)
12405
12406                 # Use a topological sort to create an unmerge order such that
12407                 # each package is unmerged before it's dependencies. This is
12408                 # necessary to avoid breaking things that may need to run
12409                 # during pkg_prerm or pkg_postrm phases.
12410
12411                 # Create a new graph to account for dependencies between the
12412                 # packages being unmerged.
12413                 graph = digraph()
12414                 del cleanlist[:]
12415
12416                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
12417                 runtime = UnmergeDepPriority(runtime=True)
12418                 runtime_post = UnmergeDepPriority(runtime_post=True)
12419                 buildtime = UnmergeDepPriority(buildtime=True)
12420                 priority_map = {
12421                         "RDEPEND": runtime,
12422                         "PDEPEND": runtime_post,
12423                         "DEPEND": buildtime,
12424                 }
12425
12426                 for node in clean_set:
12427                         graph.add(node, None)
12428                         mydeps = []
12429                         node_use = node.metadata["USE"].split()
12430                         for dep_type in dep_keys:
12431                                 depstr = node.metadata[dep_type]
12432                                 if not depstr:
12433                                         continue
12434                                 try:
12435                                         portage.dep._dep_check_strict = False
12436                                         success, atoms = portage.dep_check(depstr, None, settings,
12437                                                 myuse=node_use, trees=resolver._graph_trees,
12438                                                 myroot=myroot)
12439                                 finally:
12440                                         portage.dep._dep_check_strict = True
12441                                 if not success:
12442                                         # Ignore invalid deps of packages that will
12443                                         # be uninstalled anyway.
12444                                         continue
12445
12446                                 priority = priority_map[dep_type]
12447                                 for atom in atoms:
12448                                         if not isinstance(atom, portage.dep.Atom):
12449                                                 # Ignore invalid atoms returned from dep_check().
12450                                                 continue
12451                                         if atom.blocker:
12452                                                 continue
12453                                         matches = vardb.match_pkgs(atom)
12454                                         if not matches:
12455                                                 continue
12456                                         for child_node in matches:
12457                                                 if child_node in clean_set:
12458                                                         graph.add(child_node, node, priority=priority)
12459
12460                 ordered = True
12461                 if len(graph.order) == len(graph.root_nodes()):
12462                         # If there are no dependencies between packages
12463                         # let unmerge() group them by cat/pn.
12464                         ordered = False
12465                         cleanlist = [pkg.cpv for pkg in graph.order]
12466                 else:
12467                         # Order nodes from lowest to highest overall reference count for
12468                         # optimal root node selection.
12469                         node_refcounts = {}
12470                         for node in graph.order:
12471                                 node_refcounts[node] = len(graph.parent_nodes(node))
12472                         def cmp_reference_count(node1, node2):
12473                                 return node_refcounts[node1] - node_refcounts[node2]
12474                         graph.order.sort(cmp_reference_count)
12475         
12476                         ignore_priority_range = [None]
12477                         ignore_priority_range.extend(
12478                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
12479                         while not graph.empty():
12480                                 for ignore_priority in ignore_priority_range:
12481                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
12482                                         if nodes:
12483                                                 break
12484                                 if not nodes:
12485                                         raise AssertionError("no root nodes")
12486                                 if ignore_priority is not None:
12487                                         # Some deps have been dropped due to circular dependencies,
12488                                         # so only pop one node in order do minimize the number that
12489                                         # are dropped.
12490                                         del nodes[1:]
12491                                 for node in nodes:
12492                                         graph.remove(node)
12493                                         cleanlist.append(node.cpv)
12494
12495                 unmerge(root_config, myopts, "unmerge", cleanlist,
12496                         ldpath_mtimes, ordered=ordered)
12497
12498         if action == "prune":
12499                 return
12500
12501         if not cleanlist and "--quiet" in myopts:
12502                 return
12503
12504         print "Packages installed:   "+str(len(vardb.cpv_all()))
12505         print "Packages in world:    " + \
12506                 str(len(root_config.sets["world"].getAtoms()))
12507         print "Packages in system:   " + \
12508                 str(len(root_config.sets["system"].getAtoms()))
12509         print "Required packages:    "+str(required_pkgs_total)
12510         if "--pretend" in myopts:
12511                 print "Number to remove:     "+str(len(cleanlist))
12512         else:
12513                 print "Number removed:       "+str(len(cleanlist))
12514
12515 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
12516         skip_masked=False, skip_unsatisfied=False):
12517         """
12518         Construct a depgraph for the given resume list. This will raise
12519         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
12520         @rtype: tuple
12521         @returns: (success, depgraph, dropped_tasks)
12522         """
12523         mergelist = mtimedb["resume"]["mergelist"]
12524         dropped_tasks = set()
12525         while True:
12526                 mydepgraph = depgraph(settings, trees,
12527                         myopts, myparams, spinner)
12528                 try:
12529                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
12530                                 skip_masked=skip_masked)
12531                 except depgraph.UnsatisfiedResumeDep, e:
12532                         if not skip_unsatisfied:
12533                                 raise
12534
12535                         graph = mydepgraph.digraph
12536                         unsatisfied_parents = dict((dep.parent, dep.parent) \
12537                                 for dep in e.value)
12538                         traversed_nodes = set()
12539                         unsatisfied_stack = list(unsatisfied_parents)
12540                         while unsatisfied_stack:
12541                                 pkg = unsatisfied_stack.pop()
12542                                 if pkg in traversed_nodes:
12543                                         continue
12544                                 traversed_nodes.add(pkg)
12545
12546                                 # If this package was pulled in by a parent
12547                                 # package scheduled for merge, removing this
12548                                 # package may cause the the parent package's
12549                                 # dependency to become unsatisfied.
12550                                 for parent_node in graph.parent_nodes(pkg):
12551                                         if not isinstance(parent_node, Package) \
12552                                                 or parent_node.operation not in ("merge", "nomerge"):
12553                                                 continue
12554                                         unsatisfied = \
12555                                                 graph.child_nodes(parent_node,
12556                                                 ignore_priority=DepPriority.SOFT)
12557                                         if pkg in unsatisfied:
12558                                                 unsatisfied_parents[parent_node] = parent_node
12559                                                 unsatisfied_stack.append(parent_node)
12560
12561                         pruned_mergelist = [x for x in mergelist \
12562                                 if isinstance(x, list) and \
12563                                 tuple(x) not in unsatisfied_parents]
12564
12565                         # If the mergelist doesn't shrink then this loop is infinite.
12566                         if len(pruned_mergelist) == len(mergelist):
12567                                 # This happens if a package can't be dropped because
12568                                 # it's already installed, but it has unsatisfied PDEPEND.
12569                                 raise
12570                         mergelist[:] = pruned_mergelist
12571
12572                         # Exclude installed packages that have been removed from the graph due
12573                         # to failure to build/install runtime dependencies after the dependent
12574                         # package has already been installed.
12575                         dropped_tasks.update(pkg for pkg in \
12576                                 unsatisfied_parents if pkg.operation != "nomerge")
12577                         mydepgraph.break_refs(unsatisfied_parents)
12578
12579                         del e, graph, traversed_nodes, \
12580                                 unsatisfied_parents, unsatisfied_stack
12581                         continue
12582                 else:
12583                         break
12584         return (success, mydepgraph, dropped_tasks)
12585
12586 def action_build(settings, trees, mtimedb,
12587         myopts, myaction, myfiles, spinner):
12588
12589         # validate the state of the resume data
12590         # so that we can make assumptions later.
12591         for k in ("resume", "resume_backup"):
12592                 if k not in mtimedb:
12593                         continue
12594                 resume_data = mtimedb[k]
12595                 if not isinstance(resume_data, dict):
12596                         del mtimedb[k]
12597                         continue
12598                 mergelist = resume_data.get("mergelist")
12599                 if not isinstance(mergelist, list):
12600                         del mtimedb[k]
12601                         continue
12602                 resume_opts = resume_data.get("myopts")
12603                 if not isinstance(resume_opts, (dict, list)):
12604                         del mtimedb[k]
12605                         continue
12606                 favorites = resume_data.get("favorites")
12607                 if not isinstance(favorites, list):
12608                         del mtimedb[k]
12609                         continue
12610
12611         resume = False
12612         if "--resume" in myopts and \
12613                 ("resume" in mtimedb or
12614                 "resume_backup" in mtimedb):
12615                 resume = True
12616                 if "resume" not in mtimedb:
12617                         mtimedb["resume"] = mtimedb["resume_backup"]
12618                         del mtimedb["resume_backup"]
12619                         mtimedb.commit()
12620                 # "myopts" is a list for backward compatibility.
12621                 resume_opts = mtimedb["resume"].get("myopts", [])
12622                 if isinstance(resume_opts, list):
12623                         resume_opts = dict((k,True) for k in resume_opts)
12624                 for opt in ("--skipfirst", "--ask", "--tree"):
12625                         resume_opts.pop(opt, None)
12626                 myopts.update(resume_opts)
12627
12628                 if "--debug" in myopts:
12629                         writemsg_level("myopts %s\n" % (myopts,))
12630
12631                 # Adjust config according to options of the command being resumed.
12632                 for myroot in trees:
12633                         mysettings =  trees[myroot]["vartree"].settings
12634                         mysettings.unlock()
12635                         adjust_config(myopts, mysettings)
12636                         mysettings.lock()
12637                         del myroot, mysettings
12638
12639         ldpath_mtimes = mtimedb["ldpath"]
12640         favorites=[]
12641         merge_count = 0
12642         buildpkgonly = "--buildpkgonly" in myopts
12643         pretend = "--pretend" in myopts
12644         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
12645         ask = "--ask" in myopts
12646         nodeps = "--nodeps" in myopts
12647         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
12648         tree = "--tree" in myopts
12649         if nodeps and tree:
12650                 tree = False
12651                 del myopts["--tree"]
12652                 portage.writemsg(colorize("WARN", " * ") + \
12653                         "--tree is broken with --nodeps. Disabling...\n")
12654         debug = "--debug" in myopts
12655         verbose = "--verbose" in myopts
12656         quiet = "--quiet" in myopts
12657         if pretend or fetchonly:
12658                 # make the mtimedb readonly
12659                 mtimedb.filename = None
12660         if "--digest" in myopts:
12661                 msg = "The --digest option can prevent corruption from being" + \
12662                         " noticed. The `repoman manifest` command is the preferred" + \
12663                         " way to generate manifests and it is capable of doing an" + \
12664                         " entire repository or category at once."
12665                 prefix = bad(" * ")
12666                 writemsg(prefix + "\n")
12667                 from textwrap import wrap
12668                 for line in wrap(msg, 72):
12669                         writemsg("%s%s\n" % (prefix, line))
12670                 writemsg(prefix + "\n")
12671
12672         if "--quiet" not in myopts and \
12673                 ("--pretend" in myopts or "--ask" in myopts or \
12674                 "--tree" in myopts or "--verbose" in myopts):
12675                 action = ""
12676                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12677                         action = "fetched"
12678                 elif "--buildpkgonly" in myopts:
12679                         action = "built"
12680                 else:
12681                         action = "merged"
12682                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
12683                         print
12684                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
12685                         print
12686                 else:
12687                         print
12688                         print darkgreen("These are the packages that would be %s, in order:") % action
12689                         print
12690
12691         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
12692         if not show_spinner:
12693                 spinner.update = spinner.update_quiet
12694
12695         if resume:
12696                 favorites = mtimedb["resume"].get("favorites")
12697                 if not isinstance(favorites, list):
12698                         favorites = []
12699
12700                 if show_spinner:
12701                         print "Calculating dependencies  ",
12702                 myparams = create_depgraph_params(myopts, myaction)
12703
12704                 resume_data = mtimedb["resume"]
12705                 mergelist = resume_data["mergelist"]
12706                 if mergelist and "--skipfirst" in myopts:
12707                         for i, task in enumerate(mergelist):
12708                                 if isinstance(task, list) and \
12709                                         task and task[-1] == "merge":
12710                                         del mergelist[i]
12711                                         break
12712
12713                 skip_masked      = "--skipfirst" in myopts
12714                 skip_unsatisfied = "--skipfirst" in myopts
12715                 success = False
12716                 mydepgraph = None
12717                 try:
12718                         success, mydepgraph, dropped_tasks = resume_depgraph(
12719                                 settings, trees, mtimedb, myopts, myparams, spinner,
12720                                 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
12721                 except (portage.exception.PackageNotFound,
12722                         depgraph.UnsatisfiedResumeDep), e:
12723                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
12724                                 mydepgraph = e.depgraph
12725                         if show_spinner:
12726                                 print
12727                         from textwrap import wrap
12728                         from portage.output import EOutput
12729                         out = EOutput()
12730
12731                         resume_data = mtimedb["resume"]
12732                         mergelist = resume_data.get("mergelist")
12733                         if not isinstance(mergelist, list):
12734                                 mergelist = []
12735                         if mergelist and debug or (verbose and not quiet):
12736                                 out.eerror("Invalid resume list:")
12737                                 out.eerror("")
12738                                 indent = "  "
12739                                 for task in mergelist:
12740                                         if isinstance(task, list):
12741                                                 out.eerror(indent + str(tuple(task)))
12742                                 out.eerror("")
12743
12744                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
12745                                 out.eerror("One or more packages are either masked or " + \
12746                                         "have missing dependencies:")
12747                                 out.eerror("")
12748                                 indent = "  "
12749                                 for dep in e.value:
12750                                         if dep.atom is None:
12751                                                 out.eerror(indent + "Masked package:")
12752                                                 out.eerror(2 * indent + str(dep.parent))
12753                                                 out.eerror("")
12754                                         else:
12755                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
12756                                                 out.eerror(2 * indent + str(dep.parent))
12757                                                 out.eerror("")
12758                                 msg = "The resume list contains packages " + \
12759                                         "that are either masked or have " + \
12760                                         "unsatisfied dependencies. " + \
12761                                         "Please restart/continue " + \
12762                                         "the operation manually, or use --skipfirst " + \
12763                                         "to skip the first package in the list and " + \
12764                                         "any other packages that may be " + \
12765                                         "masked or have missing dependencies."
12766                                 for line in wrap(msg, 72):
12767                                         out.eerror(line)
12768                         elif isinstance(e, portage.exception.PackageNotFound):
12769                                 out.eerror("An expected package is " + \
12770                                         "not available: %s" % str(e))
12771                                 out.eerror("")
12772                                 msg = "The resume list contains one or more " + \
12773                                         "packages that are no longer " + \
12774                                         "available. Please restart/continue " + \
12775                                         "the operation manually."
12776                                 for line in wrap(msg, 72):
12777                                         out.eerror(line)
12778                 else:
12779                         if show_spinner:
12780                                 print "\b\b... done!"
12781
12782                 if success:
12783                         if dropped_tasks:
12784                                 portage.writemsg("!!! One or more packages have been " + \
12785                                         "dropped due to\n" + \
12786                                         "!!! masking or unsatisfied dependencies:\n\n",
12787                                         noiselevel=-1)
12788                                 for task in dropped_tasks:
12789                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
12790                                 portage.writemsg("\n", noiselevel=-1)
12791                         del dropped_tasks
12792                 else:
12793                         if mydepgraph is not None:
12794                                 mydepgraph.display_problems()
12795                         if not (ask or pretend):
12796                                 # delete the current list and also the backup
12797                                 # since it's probably stale too.
12798                                 for k in ("resume", "resume_backup"):
12799                                         mtimedb.pop(k, None)
12800                                 mtimedb.commit()
12801
12802                         return 1
12803         else:
12804                 if ("--resume" in myopts):
12805                         print darkgreen("emerge: It seems we have nothing to resume...")
12806                         return os.EX_OK
12807
12808                 myparams = create_depgraph_params(myopts, myaction)
12809                 if "--quiet" not in myopts and "--nodeps" not in myopts:
12810                         print "Calculating dependencies  ",
12811                         sys.stdout.flush()
12812                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
12813                 try:
12814                         retval, favorites = mydepgraph.select_files(myfiles)
12815                 except portage.exception.PackageNotFound, e:
12816                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
12817                         return 1
12818                 except portage.exception.PackageSetNotFound, e:
12819                         root_config = trees[settings["ROOT"]]["root_config"]
12820                         display_missing_pkg_set(root_config, e.value)
12821                         return 1
12822                 if show_spinner:
12823                         print "\b\b... done!"
12824                 if not retval:
12825                         mydepgraph.display_problems()
12826                         return 1
12827
12828         if "--pretend" not in myopts and \
12829                 ("--ask" in myopts or "--tree" in myopts or \
12830                 "--verbose" in myopts) and \
12831                 not ("--quiet" in myopts and "--ask" not in myopts):
12832                 if "--resume" in myopts:
12833                         mymergelist = mydepgraph.altlist()
12834                         if len(mymergelist) == 0:
12835                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12836                                 return os.EX_OK
12837                         favorites = mtimedb["resume"]["favorites"]
12838                         retval = mydepgraph.display(
12839                                 mydepgraph.altlist(reversed=tree),
12840                                 favorites=favorites)
12841                         mydepgraph.display_problems()
12842                         if retval != os.EX_OK:
12843                                 return retval
12844                         prompt="Would you like to resume merging these packages?"
12845                 else:
12846                         retval = mydepgraph.display(
12847                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
12848                                 favorites=favorites)
12849                         mydepgraph.display_problems()
12850                         if retval != os.EX_OK:
12851                                 return retval
12852                         mergecount=0
12853                         for x in mydepgraph.altlist():
12854                                 if isinstance(x, Package) and x.operation == "merge":
12855                                         mergecount += 1
12856
12857                         if mergecount==0:
12858                                 sets = trees[settings["ROOT"]]["root_config"].sets
12859                                 world_candidates = None
12860                                 if "--noreplace" in myopts and \
12861                                         not oneshot and favorites:
12862                                         # Sets that are not world candidates are filtered
12863                                         # out here since the favorites list needs to be
12864                                         # complete for depgraph.loadResumeCommand() to
12865                                         # operate correctly.
12866                                         world_candidates = [x for x in favorites \
12867                                                 if not (x.startswith(SETPREFIX) and \
12868                                                 not sets[x[1:]].world_candidate)]
12869                                 if "--noreplace" in myopts and \
12870                                         not oneshot and world_candidates:
12871                                         print
12872                                         for x in world_candidates:
12873                                                 print " %s %s" % (good("*"), x)
12874                                         prompt="Would you like to add these packages to your world favorites?"
12875                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
12876                                         prompt="Nothing to merge; would you like to auto-clean packages?"
12877                                 else:
12878                                         print
12879                                         print "Nothing to merge; quitting."
12880                                         print
12881                                         return os.EX_OK
12882                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
12883                                 prompt="Would you like to fetch the source files for these packages?"
12884                         else:
12885                                 prompt="Would you like to merge these packages?"
12886                 print
12887                 if "--ask" in myopts and userquery(prompt) == "No":
12888                         print
12889                         print "Quitting."
12890                         print
12891                         return os.EX_OK
12892                 # Don't ask again (e.g. when auto-cleaning packages after merge)
12893                 myopts.pop("--ask", None)
12894
12895         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12896                 if ("--resume" in myopts):
12897                         mymergelist = mydepgraph.altlist()
12898                         if len(mymergelist) == 0:
12899                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
12900                                 return os.EX_OK
12901                         favorites = mtimedb["resume"]["favorites"]
12902                         retval = mydepgraph.display(
12903                                 mydepgraph.altlist(reversed=tree),
12904                                 favorites=favorites)
12905                         mydepgraph.display_problems()
12906                         if retval != os.EX_OK:
12907                                 return retval
12908                 else:
12909                         retval = mydepgraph.display(
12910                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
12911                                 favorites=favorites)
12912                         mydepgraph.display_problems()
12913                         if retval != os.EX_OK:
12914                                 return retval
12915                         if "--buildpkgonly" in myopts:
12916                                 graph_copy = mydepgraph.digraph.clone()
12917                                 for node in list(graph_copy.order):
12918                                         if not isinstance(node, Package):
12919                                                 graph_copy.remove(node)
12920                                 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12921                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
12922                                         print "!!! You have to merge the dependencies before you can build this package.\n"
12923                                         return 1
12924         else:
12925                 if "--buildpkgonly" in myopts:
12926                         graph_copy = mydepgraph.digraph.clone()
12927                         for node in list(graph_copy.order):
12928                                 if not isinstance(node, Package):
12929                                         graph_copy.remove(node)
12930                         if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
12931                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
12932                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
12933                                 return 1
12934
12935                 if ("--resume" in myopts):
12936                         favorites=mtimedb["resume"]["favorites"]
12937                         mymergelist = mydepgraph.altlist()
12938                         mydepgraph.break_refs(mymergelist)
12939                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
12940                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
12941                         del mydepgraph, mymergelist
12942                         clear_caches(trees)
12943
12944                         retval = mergetask.merge()
12945                         merge_count = mergetask.curval
12946                 else:
12947                         if "resume" in mtimedb and \
12948                         "mergelist" in mtimedb["resume"] and \
12949                         len(mtimedb["resume"]["mergelist"]) > 1:
12950                                 mtimedb["resume_backup"] = mtimedb["resume"]
12951                                 del mtimedb["resume"]
12952                                 mtimedb.commit()
12953                         mtimedb["resume"]={}
12954                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
12955                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
12956                         # a list type for options.
12957                         mtimedb["resume"]["myopts"] = myopts.copy()
12958
12959                         # Convert Atom instances to plain str since the mtimedb loader
12960                         # sets unpickler.find_global = None which causes unpickler.load()
12961                         # to raise the following exception:
12962                         #
12963                         # cPickle.UnpicklingError: Global and instance pickles are not supported.
12964                         #
12965                         # TODO: Maybe stop setting find_global = None, or find some other
12966                         # way to avoid accidental triggering of the above UnpicklingError.
12967                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
12968
12969                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
12970                                 for pkgline in mydepgraph.altlist():
12971                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
12972                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
12973                                                 tmpsettings = portage.config(clone=settings)
12974                                                 edebug = 0
12975                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
12976                                                         edebug = 1
12977                                                 retval = portage.doebuild(
12978                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
12979                                                         ("--pretend" in myopts),
12980                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
12981                                                         tree="porttree")
12982
12983                         pkglist = mydepgraph.altlist()
12984                         mydepgraph.saveNomergeFavorites()
12985                         mydepgraph.break_refs(pkglist)
12986                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
12987                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
12988                         del mydepgraph, pkglist
12989                         clear_caches(trees)
12990
12991                         retval = mergetask.merge()
12992                         merge_count = mergetask.curval
12993
12994                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
12995                         if "yes" == settings.get("AUTOCLEAN"):
12996                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
12997                                 unmerge(trees[settings["ROOT"]]["root_config"],
12998                                         myopts, "clean", [],
12999                                         ldpath_mtimes, autoclean=1)
13000                         else:
13001                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
13002                                         + " AUTOCLEAN is disabled.  This can cause serious"
13003                                         + " problems due to overlapping packages.\n")
13004
13005                 return retval
13006
13007 def multiple_actions(action1, action2):
13008         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
13009         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
13010         sys.exit(1)
13011
13012 def insert_optional_args(args):
13013         """
13014         Parse optional arguments and insert a value if one has
13015         not been provided. This is done before feeding the args
13016         to the optparse parser since that parser does not support
13017         this feature natively.
13018         """
13019
13020         new_args = []
13021         jobs_opts = ("-j", "--jobs")
13022         arg_stack = args[:]
13023         arg_stack.reverse()
13024         while arg_stack:
13025                 arg = arg_stack.pop()
13026
13027                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
13028                 if not (short_job_opt or arg in jobs_opts):
13029                         new_args.append(arg)
13030                         continue
13031
13032                 # Insert an empty placeholder in order to
13033                 # satisfy the requirements of optparse.
13034
13035                 new_args.append("--jobs")
13036                 job_count = None
13037                 saved_opts = None
13038                 if short_job_opt and len(arg) > 2:
13039                         if arg[:2] == "-j":
13040                                 try:
13041                                         job_count = int(arg[2:])
13042                                 except ValueError:
13043                                         saved_opts = arg[2:]
13044                         else:
13045                                 job_count = "True"
13046                                 saved_opts = arg[1:].replace("j", "")
13047
13048                 if job_count is None and arg_stack:
13049                         try:
13050                                 job_count = int(arg_stack[-1])
13051                         except ValueError:
13052                                 pass
13053                         else:
13054                                 # Discard the job count from the stack
13055                                 # since we're consuming it here.
13056                                 arg_stack.pop()
13057
13058                 if job_count is None:
13059                         # unlimited number of jobs
13060                         new_args.append("True")
13061                 else:
13062                         new_args.append(str(job_count))
13063
13064                 if saved_opts is not None:
13065                         new_args.append("-" + saved_opts)
13066
13067         return new_args
13068
13069 def parse_opts(tmpcmdline, silent=False):
13070         myaction=None
13071         myopts = {}
13072         myfiles=[]
13073
13074         global actions, options, shortmapping
13075
13076         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
13077         argument_options = {
13078                 "--config-root": {
13079                         "help":"specify the location for portage configuration files",
13080                         "action":"store"
13081                 },
13082                 "--color": {
13083                         "help":"enable or disable color output",
13084                         "type":"choice",
13085                         "choices":("y", "n")
13086                 },
13087
13088                 "--jobs": {
13089
13090                         "help"   : "Specifies the number of packages to build " + \
13091                                 "simultaneously.",
13092
13093                         "action" : "store"
13094                 },
13095
13096                 "--load-average": {
13097
13098                         "help"   :"Specifies that no new builds should be started " + \
13099                                 "if there are other builds running and the load average " + \
13100                                 "is at least LOAD (a floating-point number).",
13101
13102                         "action" : "store"
13103                 },
13104
13105                 "--with-bdeps": {
13106                         "help":"include unnecessary build time dependencies",
13107                         "type":"choice",
13108                         "choices":("y", "n")
13109                 },
13110                 "--reinstall": {
13111                         "help":"specify conditions to trigger package reinstallation",
13112                         "type":"choice",
13113                         "choices":["changed-use"]
13114                 }
13115         }
13116
13117         from optparse import OptionParser
13118         parser = OptionParser()
13119         if parser.has_option("--help"):
13120                 parser.remove_option("--help")
13121
13122         for action_opt in actions:
13123                 parser.add_option("--" + action_opt, action="store_true",
13124                         dest=action_opt.replace("-", "_"), default=False)
13125         for myopt in options:
13126                 parser.add_option(myopt, action="store_true",
13127                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
13128         for shortopt, longopt in shortmapping.iteritems():
13129                 parser.add_option("-" + shortopt, action="store_true",
13130                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
13131         for myalias, myopt in longopt_aliases.iteritems():
13132                 parser.add_option(myalias, action="store_true",
13133                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
13134
13135         for myopt, kwargs in argument_options.iteritems():
13136                 parser.add_option(myopt,
13137                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
13138
13139         tmpcmdline = insert_optional_args(tmpcmdline)
13140
13141         myoptions, myargs = parser.parse_args(args=tmpcmdline)
13142
13143         if myoptions.jobs:
13144                 jobs = None
13145                 if myoptions.jobs == "True":
13146                         jobs = True
13147                 else:
13148                         try:
13149                                 jobs = int(myoptions.jobs)
13150                         except ValueError:
13151                                 jobs = -1
13152
13153                 if jobs is not True and \
13154                         jobs < 1:
13155                         jobs = None
13156                         if not silent:
13157                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
13158                                         (myoptions.jobs,), noiselevel=-1)
13159
13160                 myoptions.jobs = jobs
13161
13162         if myoptions.load_average:
13163                 try:
13164                         load_average = float(myoptions.load_average)
13165                 except ValueError:
13166                         load_average = 0.0
13167
13168                 if load_average <= 0.0:
13169                         load_average = None
13170                         if not silent:
13171                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
13172                                         (myoptions.load_average,), noiselevel=-1)
13173
13174                 myoptions.load_average = load_average
13175
13176         for myopt in options:
13177                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
13178                 if v:
13179                         myopts[myopt] = True
13180
13181         for myopt in argument_options:
13182                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
13183                 if v is not None:
13184                         myopts[myopt] = v
13185
13186         for action_opt in actions:
13187                 v = getattr(myoptions, action_opt.replace("-", "_"))
13188                 if v:
13189                         if myaction:
13190                                 multiple_actions(myaction, action_opt)
13191                                 sys.exit(1)
13192                         myaction = action_opt
13193
13194         myfiles += myargs
13195
13196         return myaction, myopts, myfiles
13197
13198 def validate_ebuild_environment(trees):
13199         for myroot in trees:
13200                 settings = trees[myroot]["vartree"].settings
13201                 settings.validate()
13202
13203 def clear_caches(trees):
13204         for d in trees.itervalues():
13205                 d["porttree"].dbapi.melt()
13206                 d["porttree"].dbapi._aux_cache.clear()
13207                 d["bintree"].dbapi._aux_cache.clear()
13208                 d["bintree"].dbapi._clear_cache()
13209         portage.dircache.clear()
13210         gc.collect()
13211
13212 def load_emerge_config(trees=None):
13213         kwargs = {}
13214         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
13215                 v = os.environ.get(envvar, None)
13216                 if v and v.strip():
13217                         kwargs[k] = v
13218         trees = portage.create_trees(trees=trees, **kwargs)
13219
13220         for root, root_trees in trees.iteritems():
13221                 settings = root_trees["vartree"].settings
13222                 setconfig = load_default_config(settings, root_trees)
13223                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
13224
13225         settings = trees["/"]["vartree"].settings
13226
13227         for myroot in trees:
13228                 if myroot != "/":
13229                         settings = trees[myroot]["vartree"].settings
13230                         break
13231
13232         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
13233         mtimedb = portage.MtimeDB(mtimedbfile)
13234         
13235         return settings, trees, mtimedb
13236
13237 def adjust_config(myopts, settings):
13238         """Make emerge specific adjustments to the config."""
13239
13240         # To enhance usability, make some vars case insensitive by forcing them to
13241         # lower case.
13242         for myvar in ("AUTOCLEAN", "NOCOLOR"):
13243                 if myvar in settings:
13244                         settings[myvar] = settings[myvar].lower()
13245                         settings.backup_changes(myvar)
13246         del myvar
13247
13248         # Kill noauto as it will break merges otherwise.
13249         if "noauto" in settings.features:
13250                 while "noauto" in settings.features:
13251                         settings.features.remove("noauto")
13252                 settings["FEATURES"] = " ".join(settings.features)
13253                 settings.backup_changes("FEATURES")
13254
13255         CLEAN_DELAY = 5
13256         try:
13257                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
13258         except ValueError, e:
13259                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13260                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
13261                         settings["CLEAN_DELAY"], noiselevel=-1)
13262         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
13263         settings.backup_changes("CLEAN_DELAY")
13264
13265         EMERGE_WARNING_DELAY = 10
13266         try:
13267                 EMERGE_WARNING_DELAY = int(settings.get(
13268                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
13269         except ValueError, e:
13270                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13271                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
13272                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
13273         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
13274         settings.backup_changes("EMERGE_WARNING_DELAY")
13275
13276         if "--quiet" in myopts:
13277                 settings["PORTAGE_QUIET"]="1"
13278                 settings.backup_changes("PORTAGE_QUIET")
13279
13280         if "--verbose" in myopts:
13281                 settings["PORTAGE_VERBOSE"] = "1"
13282                 settings.backup_changes("PORTAGE_VERBOSE")
13283
13284         # Set so that configs will be merged regardless of remembered status
13285         if ("--noconfmem" in myopts):
13286                 settings["NOCONFMEM"]="1"
13287                 settings.backup_changes("NOCONFMEM")
13288
13289         # Set various debug markers... They should be merged somehow.
13290         PORTAGE_DEBUG = 0
13291         try:
13292                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
13293                 if PORTAGE_DEBUG not in (0, 1):
13294                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
13295                                 PORTAGE_DEBUG, noiselevel=-1)
13296                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
13297                                 noiselevel=-1)
13298                         PORTAGE_DEBUG = 0
13299         except ValueError, e:
13300                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
13301                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
13302                         settings["PORTAGE_DEBUG"], noiselevel=-1)
13303                 del e
13304         if "--debug" in myopts:
13305                 PORTAGE_DEBUG = 1
13306         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
13307         settings.backup_changes("PORTAGE_DEBUG")
13308
13309         if settings.get("NOCOLOR") not in ("yes","true"):
13310                 portage.output.havecolor = 1
13311
13312         """The explicit --color < y | n > option overrides the NOCOLOR environment
13313         variable and stdout auto-detection."""
13314         if "--color" in myopts:
13315                 if "y" == myopts["--color"]:
13316                         portage.output.havecolor = 1
13317                         settings["NOCOLOR"] = "false"
13318                 else:
13319                         portage.output.havecolor = 0
13320                         settings["NOCOLOR"] = "true"
13321                 settings.backup_changes("NOCOLOR")
13322         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
13323                 portage.output.havecolor = 0
13324                 settings["NOCOLOR"] = "true"
13325                 settings.backup_changes("NOCOLOR")
13326
13327 def apply_priorities(settings):
13328         ionice(settings)
13329         nice(settings)
13330
13331 def nice(settings):
13332         try:
13333                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
13334         except (OSError, ValueError), e:
13335                 out = portage.output.EOutput()
13336                 out.eerror("Failed to change nice value to '%s'" % \
13337                         settings["PORTAGE_NICENESS"])
13338                 out.eerror("%s\n" % str(e))
13339
13340 def ionice(settings):
13341
13342         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
13343         if ionice_cmd:
13344                 ionice_cmd = shlex.split(ionice_cmd)
13345         if not ionice_cmd:
13346                 return
13347
13348         from portage.util import varexpand
13349         variables = {"PID" : str(os.getpid())}
13350         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
13351
13352         try:
13353                 rval = portage.process.spawn(cmd, env=os.environ)
13354         except portage.exception.CommandNotFound:
13355                 # The OS kernel probably doesn't support ionice,
13356                 # so return silently.
13357                 return
13358
13359         if rval != os.EX_OK:
13360                 out = portage.output.EOutput()
13361                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
13362                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
13363
13364 def display_missing_pkg_set(root_config, set_name):
13365
13366         msg = []
13367         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
13368                 "The following sets exist:") % \
13369                 colorize("INFORM", set_name))
13370         msg.append("")
13371
13372         for s in sorted(root_config.sets):
13373                 msg.append("    %s" % s)
13374         msg.append("")
13375
13376         writemsg_level("".join("%s\n" % l for l in msg),
13377                 level=logging.ERROR, noiselevel=-1)
13378
13379 def expand_set_arguments(myfiles, myaction, root_config):
13380
13381         if myaction != "search":
13382
13383                 world = False
13384                 system = False
13385
13386                 for x in myfiles:
13387                         if x[:1] == SETPREFIX:
13388                                 msg = []
13389                                 msg.append("'%s' is not a valid package atom." % (x,))
13390                                 msg.append("Please check ebuild(5) for full details.")
13391                                 writemsg_level("".join("!!! %s\n" % line for line in msg),
13392                                         level=logging.ERROR, noiselevel=-1)
13393                                 return (myfiles, 1)
13394                         elif x == "system":
13395                                 system = True
13396                         elif x == "world":
13397                                 world = True
13398
13399                 if myaction is not None:
13400                         if system:
13401                                 multiple_actions("system", myaction)
13402                                 return (myfiles, 1)
13403                         elif world:
13404                                 multiple_actions("world", myaction)
13405                                 return (myfiles, 1)
13406
13407                 if system and world:
13408                         multiple_actions("system", "world")
13409                         return (myfiles, 1)
13410
13411         return (myfiles, os.EX_OK)
13412
13413 def repo_name_check(trees):
13414         missing_repo_names = set()
13415         for root, root_trees in trees.iteritems():
13416                 if "porttree" in root_trees:
13417                         portdb = root_trees["porttree"].dbapi
13418                         missing_repo_names.update(portdb.porttrees)
13419                         repos = portdb.getRepositories()
13420                         for r in repos:
13421                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
13422
13423         if missing_repo_names:
13424                 msg = []
13425                 msg.append("WARNING: One or more repositories " + \
13426                         "have missing repo_name entries:")
13427                 msg.append("")
13428                 for p in missing_repo_names:
13429                         msg.append("\t%s/profiles/repo_name" % (p,))
13430                 msg.append("")
13431                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
13432                         "should be a plain text file containing a unique " + \
13433                         "name for the repository on the first line.", 70))
13434                 writemsg_level("".join("%s\n" % l for l in msg),
13435                         level=logging.WARNING, noiselevel=-1)
13436
13437         return bool(missing_repo_names)
13438
13439 def config_protect_check(trees):
13440         for root, root_trees in trees.iteritems():
13441                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
13442                         msg = "!!! CONFIG_PROTECT is empty"
13443                         if root != "/":
13444                                 msg += " for '%s'" % root
13445                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
13446
13447 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
13448
13449         if "--quiet" in myopts:
13450                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13451                 print "!!! one of the following fully-qualified ebuild names instead:\n"
13452                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13453                         print "    " + colorize("INFORM", cp)
13454                 return
13455
13456         s = search(root_config, spinner, "--searchdesc" in myopts,
13457                 "--quiet" not in myopts, "--usepkg" in myopts,
13458                 "--usepkgonly" in myopts)
13459         null_cp = portage.dep_getkey(insert_category_into_atom(
13460                 arg, "null"))
13461         cat, atom_pn = portage.catsplit(null_cp)
13462         s.searchkey = atom_pn
13463         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
13464                 s.addCP(cp)
13465         s.output()
13466         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
13467         print "!!! one of the above fully-qualified ebuild names instead.\n"
13468
13469 def profile_check(trees, myaction, myopts):
13470         if myaction in ("info", "sync"):
13471                 return os.EX_OK
13472         elif "--version" in myopts or "--help" in myopts:
13473                 return os.EX_OK
13474         for root, root_trees in trees.iteritems():
13475                 if root_trees["root_config"].settings.profiles:
13476                         continue
13477                 # generate some profile related warning messages
13478                 validate_ebuild_environment(trees)
13479                 msg = "If you have just changed your profile configuration, you " + \
13480                         "should revert back to the previous configuration. Due to " + \
13481                         "your current profile being invalid, allowed actions are " + \
13482                         "limited to --help, --info, --sync, and --version."
13483                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
13484                         level=logging.ERROR, noiselevel=-1)
13485                 return 1
13486         return os.EX_OK
13487
13488 def emerge_main():
13489         global portage  # NFC why this is necessary now - genone
13490         portage._disable_legacy_globals()
13491         # Disable color until we're sure that it should be enabled (after
13492         # EMERGE_DEFAULT_OPTS has been parsed).
13493         portage.output.havecolor = 0
13494         # This first pass is just for options that need to be known as early as
13495         # possible, such as --config-root.  They will be parsed again later,
13496         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
13497         # the value of --config-root).
13498         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
13499         if "--debug" in myopts:
13500                 os.environ["PORTAGE_DEBUG"] = "1"
13501         if "--config-root" in myopts:
13502                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
13503
13504         # Portage needs to ensure a sane umask for the files it creates.
13505         os.umask(022)
13506         settings, trees, mtimedb = load_emerge_config()
13507         portdb = trees[settings["ROOT"]]["porttree"].dbapi
13508         rval = profile_check(trees, myaction, myopts)
13509         if rval != os.EX_OK:
13510                 return rval
13511
13512         if portage._global_updates(trees, mtimedb["updates"]):
13513                 mtimedb.commit()
13514                 # Reload the whole config from scratch.
13515                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13516                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13517
13518         xterm_titles = "notitles" not in settings.features
13519
13520         tmpcmdline = []
13521         if "--ignore-default-opts" not in myopts:
13522                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
13523         tmpcmdline.extend(sys.argv[1:])
13524         myaction, myopts, myfiles = parse_opts(tmpcmdline)
13525
13526         if "--digest" in myopts:
13527                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
13528                 # Reload the whole config from scratch so that the portdbapi internal
13529                 # config is updated with new FEATURES.
13530                 settings, trees, mtimedb = load_emerge_config(trees=trees)
13531                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
13532
13533         for myroot in trees:
13534                 mysettings =  trees[myroot]["vartree"].settings
13535                 mysettings.unlock()
13536                 adjust_config(myopts, mysettings)
13537                 mysettings["PORTAGE_COUNTER_HASH"] = \
13538                         trees[myroot]["vartree"].dbapi._counter_hash()
13539                 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
13540                 mysettings.lock()
13541                 del myroot, mysettings
13542
13543         apply_priorities(settings)
13544
13545         spinner = stdout_spinner()
13546         if "candy" in settings.features:
13547                 spinner.update = spinner.update_scroll
13548
13549         if "--quiet" not in myopts:
13550                 portage.deprecated_profile_check()
13551                 repo_name_check(trees)
13552                 config_protect_check(trees)
13553
13554         eclasses_overridden = {}
13555         for mytrees in trees.itervalues():
13556                 mydb = mytrees["porttree"].dbapi
13557                 # Freeze the portdbapi for performance (memoize all xmatch results).
13558                 mydb.freeze()
13559                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
13560         del mytrees, mydb
13561
13562         if eclasses_overridden and \
13563                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
13564                 prefix = bad(" * ")
13565                 if len(eclasses_overridden) == 1:
13566                         writemsg(prefix + "Overlay eclass overrides " + \
13567                                 "eclass from PORTDIR:\n", noiselevel=-1)
13568                 else:
13569                         writemsg(prefix + "Overlay eclasses override " + \
13570                                 "eclasses from PORTDIR:\n", noiselevel=-1)
13571                 writemsg(prefix + "\n", noiselevel=-1)
13572                 for eclass_name in sorted(eclasses_overridden):
13573                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
13574                                 (eclasses_overridden[eclass_name], eclass_name),
13575                                 noiselevel=-1)
13576                 writemsg(prefix + "\n", noiselevel=-1)
13577                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
13578                 "because it will trigger invalidation of cached ebuild metadata " + \
13579                 "that is distributed with the portage tree. If you must " + \
13580                 "override eclasses from PORTDIR then you are advised to add " + \
13581                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
13582                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
13583                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
13584                 "you would like to disable this warning."
13585                 from textwrap import wrap
13586                 for line in wrap(msg, 72):
13587                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
13588
13589         if "moo" in myfiles:
13590                 print """
13591
13592   Larry loves Gentoo (""" + platform.system() + """)
13593
13594  _______________________
13595 < Have you mooed today? >
13596  -----------------------
13597         \   ^__^
13598          \  (oo)\_______
13599             (__)\       )\/\ 
13600                 ||----w |
13601                 ||     ||
13602
13603 """
13604
13605         for x in myfiles:
13606                 ext = os.path.splitext(x)[1]
13607                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
13608                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
13609                         break
13610
13611         root_config = trees[settings["ROOT"]]["root_config"]
13612         if myaction == "list-sets":
13613                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
13614                 sys.stdout.flush()
13615                 return os.EX_OK
13616
13617         # only expand sets for actions taking package arguments
13618         oldargs = myfiles[:]
13619         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
13620                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
13621                 if retval != os.EX_OK:
13622                         return retval
13623
13624                 # Need to handle empty sets specially, otherwise emerge will react 
13625                 # with the help message for empty argument lists
13626                 if oldargs and not myfiles:
13627                         print "emerge: no targets left after set expansion"
13628                         return 0
13629
13630         if ("--tree" in myopts) and ("--columns" in myopts):
13631                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
13632                 return 1
13633
13634         if ("--quiet" in myopts):
13635                 spinner.update = spinner.update_quiet
13636                 portage.util.noiselimit = -1
13637
13638         # Always create packages if FEATURES=buildpkg
13639         # Imply --buildpkg if --buildpkgonly
13640         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
13641                 if "--buildpkg" not in myopts:
13642                         myopts["--buildpkg"] = True
13643
13644         # Also allow -S to invoke search action (-sS)
13645         if ("--searchdesc" in myopts):
13646                 if myaction and myaction != "search":
13647                         myfiles.append(myaction)
13648                 if "--search" not in myopts:
13649                         myopts["--search"] = True
13650                 myaction = "search"
13651
13652         # Always try and fetch binary packages if FEATURES=getbinpkg
13653         if ("getbinpkg" in settings.features):
13654                 myopts["--getbinpkg"] = True
13655
13656         if "--buildpkgonly" in myopts:
13657                 # --buildpkgonly will not merge anything, so
13658                 # it cancels all binary package options.
13659                 for opt in ("--getbinpkg", "--getbinpkgonly",
13660                         "--usepkg", "--usepkgonly"):
13661                         myopts.pop(opt, None)
13662
13663         if "--fetch-all-uri" in myopts:
13664                 myopts["--fetchonly"] = True
13665
13666         if "--skipfirst" in myopts and "--resume" not in myopts:
13667                 myopts["--resume"] = True
13668
13669         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
13670                 myopts["--usepkgonly"] = True
13671
13672         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
13673                 myopts["--getbinpkg"] = True
13674
13675         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
13676                 myopts["--usepkg"] = True
13677
13678         # Also allow -K to apply --usepkg/-k
13679         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
13680                 myopts["--usepkg"] = True
13681
13682         # Allow -p to remove --ask
13683         if ("--pretend" in myopts) and ("--ask" in myopts):
13684                 print ">>> --pretend disables --ask... removing --ask from options."
13685                 del myopts["--ask"]
13686
13687         # forbid --ask when not in a terminal
13688         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
13689         if ("--ask" in myopts) and (not sys.stdin.isatty()):
13690                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
13691                         noiselevel=-1)
13692                 return 1
13693
13694         if settings.get("PORTAGE_DEBUG", "") == "1":
13695                 spinner.update = spinner.update_quiet
13696                 portage.debug=1
13697                 if "python-trace" in settings.features:
13698                         import portage.debug
13699                         portage.debug.set_trace(True)
13700
13701         if not ("--quiet" in myopts):
13702                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
13703                         spinner.update = spinner.update_basic
13704
13705         if "--version" in myopts:
13706                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
13707                         settings.profile_path, settings["CHOST"],
13708                         trees[settings["ROOT"]]["vartree"].dbapi)
13709                 return 0
13710         elif "--help" in myopts:
13711                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13712                 return 0
13713
13714         if "--debug" in myopts:
13715                 print "myaction", myaction
13716                 print "myopts", myopts
13717
13718         if not myaction and not myfiles and "--resume" not in myopts:
13719                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
13720                 return 1
13721
13722         pretend = "--pretend" in myopts
13723         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13724         buildpkgonly = "--buildpkgonly" in myopts
13725
13726         # check if root user is the current user for the actions where emerge needs this
13727         if portage.secpass < 2:
13728                 # We've already allowed "--version" and "--help" above.
13729                 if "--pretend" not in myopts and myaction not in ("search","info"):
13730                         need_superuser = not \
13731                                 (fetchonly or \
13732                                 (buildpkgonly and secpass >= 1) or \
13733                                 myaction in ("metadata", "regen") or \
13734                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
13735                         if portage.secpass < 1 or \
13736                                 need_superuser:
13737                                 if need_superuser:
13738                                         access_desc = "superuser"
13739                                 else:
13740                                         access_desc = "portage group"
13741                                 # Always show portage_group_warning() when only portage group
13742                                 # access is required but the user is not in the portage group.
13743                                 from portage.data import portage_group_warning
13744                                 if "--ask" in myopts:
13745                                         myopts["--pretend"] = True
13746                                         del myopts["--ask"]
13747                                         print ("%s access is required... " + \
13748                                                 "adding --pretend to options.\n") % access_desc
13749                                         if portage.secpass < 1 and not need_superuser:
13750                                                 portage_group_warning()
13751                                 else:
13752                                         sys.stderr.write(("emerge: %s access is " + \
13753                                                 "required.\n\n") % access_desc)
13754                                         if portage.secpass < 1 and not need_superuser:
13755                                                 portage_group_warning()
13756                                         return 1
13757
13758         disable_emergelog = False
13759         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
13760                 if x in myopts:
13761                         disable_emergelog = True
13762                         break
13763         if myaction in ("search", "info"):
13764                 disable_emergelog = True
13765         if disable_emergelog:
13766                 """ Disable emergelog for everything except build or unmerge
13767                 operations.  This helps minimize parallel emerge.log entries that can
13768                 confuse log parsers.  We especially want it disabled during
13769                 parallel-fetch, which uses --resume --fetchonly."""
13770                 global emergelog
13771                 def emergelog(*pargs, **kargs):
13772                         pass
13773
13774         if not "--pretend" in myopts:
13775                 emergelog(xterm_titles, "Started emerge on: "+\
13776                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
13777                 myelogstr=""
13778                 if myopts:
13779                         myelogstr=" ".join(myopts)
13780                 if myaction:
13781                         myelogstr+=" "+myaction
13782                 if myfiles:
13783                         myelogstr += " " + " ".join(oldargs)
13784                 emergelog(xterm_titles, " *** emerge " + myelogstr)
13785         del oldargs
13786
13787         def emergeexitsig(signum, frame):
13788                 signal.signal(signal.SIGINT, signal.SIG_IGN)
13789                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
13790                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
13791                 sys.exit(100+signum)
13792         signal.signal(signal.SIGINT, emergeexitsig)
13793         signal.signal(signal.SIGTERM, emergeexitsig)
13794
13795         def emergeexit():
13796                 """This gets out final log message in before we quit."""
13797                 if "--pretend" not in myopts:
13798                         emergelog(xterm_titles, " *** terminating.")
13799                 if "notitles" not in settings.features:
13800                         xtermTitleReset()
13801         portage.atexit_register(emergeexit)
13802
13803         if myaction in ("config", "metadata", "regen", "sync"):
13804                 if "--pretend" in myopts:
13805                         sys.stderr.write(("emerge: The '%s' action does " + \
13806                                 "not support '--pretend'.\n") % myaction)
13807                         return 1
13808
13809         if "sync" == myaction:
13810                 return action_sync(settings, trees, mtimedb, myopts, myaction)
13811         elif "metadata" == myaction:
13812                 action_metadata(settings, portdb, myopts)
13813         elif myaction=="regen":
13814                 validate_ebuild_environment(trees)
13815                 action_regen(settings, portdb, myopts.get("--jobs"),
13816                         myopts.get("--load-average"))
13817         # HELP action
13818         elif "config"==myaction:
13819                 validate_ebuild_environment(trees)
13820                 action_config(settings, trees, myopts, myfiles)
13821
13822         # SEARCH action
13823         elif "search"==myaction:
13824                 validate_ebuild_environment(trees)
13825                 action_search(trees[settings["ROOT"]]["root_config"],
13826                         myopts, myfiles, spinner)
13827         elif myaction in ("clean", "unmerge") or \
13828                 (myaction == "prune" and "--nodeps" in myopts):
13829                 validate_ebuild_environment(trees)
13830
13831                 # Ensure atoms are valid before calling unmerge().
13832                 # For backward compat, leading '=' is not required.
13833                 for x in myfiles:
13834                         if is_valid_package_atom(x) or \
13835                                 is_valid_package_atom("=" + x):
13836                                 continue
13837                         msg = []
13838                         msg.append("'%s' is not a valid package atom." % (x,))
13839                         msg.append("Please check ebuild(5) for full details.")
13840                         writemsg_level("".join("!!! %s\n" % line for line in msg),
13841                                 level=logging.ERROR, noiselevel=-1)
13842                         return 1
13843
13844                 # When given a list of atoms, unmerge
13845                 # them in the order given.
13846                 ordered = myaction == "unmerge"
13847                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
13848                         mtimedb["ldpath"], ordered=ordered):
13849                         if not (buildpkgonly or fetchonly or pretend):
13850                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13851
13852         elif myaction in ("depclean", "info", "prune"):
13853
13854                 # Ensure atoms are valid before calling unmerge().
13855                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
13856                 valid_atoms = []
13857                 for x in myfiles:
13858                         if is_valid_package_atom(x):
13859                                 try:
13860                                         valid_atoms.append(
13861                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
13862                                 except portage.exception.AmbiguousPackageName, e:
13863                                         msg = "The short ebuild name \"" + x + \
13864                                                 "\" is ambiguous.  Please specify " + \
13865                                                 "one of the following " + \
13866                                                 "fully-qualified ebuild names instead:"
13867                                         for line in textwrap.wrap(msg, 70):
13868                                                 writemsg_level("!!! %s\n" % (line,),
13869                                                         level=logging.ERROR, noiselevel=-1)
13870                                         for i in e[0]:
13871                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
13872                                                         level=logging.ERROR, noiselevel=-1)
13873                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13874                                         return 1
13875                                 continue
13876                         msg = []
13877                         msg.append("'%s' is not a valid package atom." % (x,))
13878                         msg.append("Please check ebuild(5) for full details.")
13879                         writemsg_level("".join("!!! %s\n" % line for line in msg),
13880                                 level=logging.ERROR, noiselevel=-1)
13881                         return 1
13882
13883                 if myaction == "info":
13884                         return action_info(settings, trees, myopts, valid_atoms)
13885
13886                 validate_ebuild_environment(trees)
13887                 action_depclean(settings, trees, mtimedb["ldpath"],
13888                         myopts, myaction, valid_atoms, spinner)
13889                 if not (buildpkgonly or fetchonly or pretend):
13890                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
13891         # "update", "system", or just process files:
13892         else:
13893                 validate_ebuild_environment(trees)
13894                 if "--pretend" not in myopts:
13895                         display_news_notification(root_config, myopts)
13896                 retval = action_build(settings, trees, mtimedb,
13897                         myopts, myaction, myfiles, spinner)
13898                 root_config = trees[settings["ROOT"]]["root_config"]
13899                 post_emerge(root_config, myopts, mtimedb, retval)
13900
13901                 return retval