Bug #241808 - When loading options for --resume, discard the --color option
[portage.git] / pym / _emerge / __init__.py
1 #!/usr/bin/python -O
2 # Copyright 1999-2006 Gentoo Foundation
3 # Distributed under the terms of the GNU General Public License v2
4 # $Id: emerge 5976 2007-02-17 09:14:53Z genone $
5
6 import array
7 from collections import deque
8 import fcntl
9 import formatter
10 import logging
11 import pwd
12 import select
13 import shlex
14 import shutil
15 import signal
16 import sys
17 import textwrap
18 import urlparse
19 import weakref
20 import gc
21 import os, stat
22 import platform
23
24 try:
25         import portage
26 except ImportError:
27         from os import path as osp
28         sys.path.insert(0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "pym"))
29         import portage
30
31 from portage import digraph
32 from portage.const import NEWS_LIB_PATH
33
34 import _emerge.help
35 import portage.xpak, commands, errno, re, socket, time, types
36 from portage.output import blue, bold, colorize, darkblue, darkgreen, darkred, green, \
37         nc_len, red, teal, turquoise, xtermTitle, \
38         xtermTitleReset, yellow
39 from portage.output import create_color_func
40 good = create_color_func("GOOD")
41 bad = create_color_func("BAD")
42 # white looks bad on terminals with white background
43 from portage.output import bold as white
44
45 import portage.elog
46 import portage.dep
47 portage.dep._dep_check_strict = True
48 import portage.util
49 import portage.locks
50 import portage.exception
51 from portage.data import secpass
52 from portage.elog.messages import eerror
53 from portage.util import normalize_path as normpath
54 from portage.util import writemsg, writemsg_level
55 from portage.sets import load_default_config, SETPREFIX
56 from portage.sets.base import InternalPackageSet
57
58 from itertools import chain, izip
59 from UserDict import DictMixin
60
61 try:
62         import cPickle as pickle
63 except ImportError:
64         import pickle
65
66 try:
67         import cStringIO as StringIO
68 except ImportError:
69         import StringIO
70
71 class stdout_spinner(object):
72         scroll_msgs = [
73                 "Gentoo Rocks ("+platform.system()+")",
74                 "Thank you for using Gentoo. :)",
75                 "Are you actually trying to read this?",
76                 "How many times have you stared at this?",
77                 "We are generating the cache right now",
78                 "You are paying too much attention.",
79                 "A theory is better than its explanation.",
80                 "Phasers locked on target, Captain.",
81                 "Thrashing is just virtual crashing.",
82                 "To be is to program.",
83                 "Real Users hate Real Programmers.",
84                 "When all else fails, read the instructions.",
85                 "Functionality breeds Contempt.",
86                 "The future lies ahead.",
87                 "3.1415926535897932384626433832795028841971694",
88                 "Sometimes insanity is the only alternative.",
89                 "Inaccuracy saves a world of explanation.",
90         ]
91
92         twirl_sequence = "/-\\|/-\\|/-\\|/-\\|\\-/|\\-/|\\-/|\\-/|"
93
94         def __init__(self):
95                 self.spinpos = 0
96                 self.update = self.update_twirl
97                 self.scroll_sequence = self.scroll_msgs[
98                         int(time.time() * 100) % len(self.scroll_msgs)]
99                 self.last_update = 0
100                 self.min_display_latency = 0.05
101
102         def _return_early(self):
103                 """
104                 Flushing ouput to the tty too frequently wastes cpu time. Therefore,
105                 each update* method should return without doing any output when this
106                 method returns True.
107                 """
108                 cur_time = time.time()
109                 if cur_time - self.last_update < self.min_display_latency:
110                         return True
111                 self.last_update = cur_time
112                 return False
113
114         def update_basic(self):
115                 self.spinpos = (self.spinpos + 1) % 500
116                 if self._return_early():
117                         return
118                 if (self.spinpos % 100) == 0:
119                         if self.spinpos == 0:
120                                 sys.stdout.write(". ")
121                         else:
122                                 sys.stdout.write(".")
123                 sys.stdout.flush()
124
125         def update_scroll(self):
126                 if self._return_early():
127                         return
128                 if(self.spinpos >= len(self.scroll_sequence)):
129                         sys.stdout.write(darkgreen(" \b\b\b" + self.scroll_sequence[
130                                 len(self.scroll_sequence) - 1 - (self.spinpos % len(self.scroll_sequence))]))
131                 else:
132                         sys.stdout.write(green("\b " + self.scroll_sequence[self.spinpos]))
133                 sys.stdout.flush()
134                 self.spinpos = (self.spinpos + 1) % (2 * len(self.scroll_sequence))
135
136         def update_twirl(self):
137                 self.spinpos = (self.spinpos + 1) % len(self.twirl_sequence)
138                 if self._return_early():
139                         return
140                 sys.stdout.write("\b\b " + self.twirl_sequence[self.spinpos])
141                 sys.stdout.flush()
142
143         def update_quiet(self):
144                 return
145
146 def userquery(prompt, responses=None, colours=None):
147         """Displays a prompt and a set of responses, then waits for a response
148         which is checked against the responses and the first to match is
149         returned.  An empty response will match the first value in responses.  The
150         input buffer is *not* cleared prior to the prompt!
151
152         prompt: a String.
153         responses: a List of Strings.
154         colours: a List of Functions taking and returning a String, used to
155         process the responses for display. Typically these will be functions
156         like red() but could be e.g. lambda x: "DisplayString".
157         If responses is omitted, defaults to ["Yes", "No"], [green, red].
158         If only colours is omitted, defaults to [bold, ...].
159
160         Returns a member of the List responses. (If called without optional
161         arguments, returns "Yes" or "No".)
162         KeyboardInterrupt is converted to SystemExit to avoid tracebacks being
163         printed."""
164         if responses is None:
165                 responses = ["Yes", "No"]
166                 colours = [
167                         create_color_func("PROMPT_CHOICE_DEFAULT"),
168                         create_color_func("PROMPT_CHOICE_OTHER")
169                 ]
170         elif colours is None:
171                 colours=[bold]
172         colours=(colours*len(responses))[:len(responses)]
173         print bold(prompt),
174         try:
175                 while True:
176                         response=raw_input("["+"/".join([colours[i](responses[i]) for i in range(len(responses))])+"] ")
177                         for key in responses:
178                                 # An empty response will match the first value in responses.
179                                 if response.upper()==key[:len(response)].upper():
180                                         return key
181                         print "Sorry, response '%s' not understood." % response,
182         except (EOFError, KeyboardInterrupt):
183                 print "Interrupted."
184                 sys.exit(1)
185
186 actions = frozenset([
187 "clean", "config", "depclean",
188 "info", "list-sets", "metadata",
189 "prune", "regen",  "search",
190 "sync",  "unmerge",
191 ])
192 options=[
193 "--ask",          "--alphabetical",
194 "--buildpkg",     "--buildpkgonly",
195 "--changelog",    "--columns",
196 "--complete-graph",
197 "--debug",        "--deep",
198 "--digest",
199 "--emptytree",
200 "--fetchonly",    "--fetch-all-uri",
201 "--getbinpkg",    "--getbinpkgonly",
202 "--help",         "--ignore-default-opts",
203 "--keep-going",
204 "--noconfmem",
205 "--newuse",       "--nocolor",
206 "--nodeps",       "--noreplace",
207 "--nospinner",    "--oneshot",
208 "--onlydeps",     "--pretend",
209 "--quiet",        "--resume",
210 "--searchdesc",   "--selective",
211 "--skipfirst",
212 "--tree",
213 "--update",
214 "--usepkg",       "--usepkgonly",
215 "--verbose",      "--version"
216 ]
217
218 shortmapping={
219 "1":"--oneshot",
220 "a":"--ask",
221 "b":"--buildpkg",  "B":"--buildpkgonly",
222 "c":"--clean",     "C":"--unmerge",
223 "d":"--debug",     "D":"--deep",
224 "e":"--emptytree",
225 "f":"--fetchonly", "F":"--fetch-all-uri",
226 "g":"--getbinpkg", "G":"--getbinpkgonly",
227 "h":"--help",
228 "k":"--usepkg",    "K":"--usepkgonly",
229 "l":"--changelog",
230 "n":"--noreplace", "N":"--newuse",
231 "o":"--onlydeps",  "O":"--nodeps",
232 "p":"--pretend",   "P":"--prune",
233 "q":"--quiet",
234 "s":"--search",    "S":"--searchdesc",
235 "t":"--tree",
236 "u":"--update",
237 "v":"--verbose",   "V":"--version"
238 }
239
240 def emergelog(xterm_titles, mystr, short_msg=None):
241         if xterm_titles and short_msg:
242                 if "HOSTNAME" in os.environ:
243                         short_msg = os.environ["HOSTNAME"]+": "+short_msg
244                 xtermTitle(short_msg)
245         try:
246                 file_path = "/var/log/emerge.log"
247                 mylogfile = open(file_path, "a")
248                 portage.util.apply_secpass_permissions(file_path,
249                         uid=portage.portage_uid, gid=portage.portage_gid,
250                         mode=0660)
251                 mylock = None
252                 try:
253                         mylock = portage.locks.lockfile(mylogfile)
254                         # seek because we may have gotten held up by the lock.
255                         # if so, we may not be positioned at the end of the file.
256                         mylogfile.seek(0, 2)
257                         mylogfile.write(str(time.time())[:10]+": "+mystr+"\n")
258                         mylogfile.flush()
259                 finally:
260                         if mylock:
261                                 portage.locks.unlockfile(mylock)
262                         mylogfile.close()
263         except (IOError,OSError,portage.exception.PortageException), e:
264                 if secpass >= 1:
265                         print >> sys.stderr, "emergelog():",e
266
267 def countdown(secs=5, doing="Starting"):
268         if secs:
269                 print ">>> Waiting",secs,"seconds before starting..."
270                 print ">>> (Control-C to abort)...\n"+doing+" in: ",
271                 ticks=range(secs)
272                 ticks.reverse()
273                 for sec in ticks:
274                         sys.stdout.write(colorize("UNMERGE_WARN", str(sec+1)+" "))
275                         sys.stdout.flush()
276                         time.sleep(1)
277                 print
278
279 # formats a size given in bytes nicely
280 def format_size(mysize):
281         if type(mysize) not in [types.IntType,types.LongType]:
282                 return str(mysize)
283         if 0 != mysize % 1024:
284                 # Always round up to the next kB so that it doesn't show 0 kB when
285                 # some small file still needs to be fetched.
286                 mysize += 1024 - mysize % 1024
287         mystr=str(mysize/1024)
288         mycount=len(mystr)
289         while (mycount > 3):
290                 mycount-=3
291                 mystr=mystr[:mycount]+","+mystr[mycount:]
292         return mystr+" kB"
293
294
295 def getgccversion(chost):
296         """
297         rtype: C{str}
298         return:  the current in-use gcc version
299         """
300
301         gcc_ver_command = 'gcc -dumpversion'
302         gcc_ver_prefix = 'gcc-'
303
304         gcc_not_found_error = red(
305         "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
306         "!!! to update the environment of this terminal and possibly\n" +
307         "!!! other terminals also.\n"
308         )
309
310         mystatus, myoutput = commands.getstatusoutput("gcc-config -c")
311         if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
312                 return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
313
314         mystatus, myoutput = commands.getstatusoutput(
315                 chost + "-" + gcc_ver_command)
316         if mystatus == os.EX_OK:
317                 return gcc_ver_prefix + myoutput
318
319         mystatus, myoutput = commands.getstatusoutput(gcc_ver_command)
320         if mystatus == os.EX_OK:
321                 return gcc_ver_prefix + myoutput
322
323         portage.writemsg(gcc_not_found_error, noiselevel=-1)
324         return "[unavailable]"
325
326 def getportageversion(portdir, target_root, profile, chost, vardb):
327         profilever = "unavailable"
328         if profile:
329                 realpath = os.path.realpath(profile)
330                 basepath   = os.path.realpath(os.path.join(portdir, "profiles"))
331                 if realpath.startswith(basepath):
332                         profilever = realpath[1 + len(basepath):]
333                 else:
334                         try:
335                                 profilever = "!" + os.readlink(profile)
336                         except (OSError):
337                                 pass
338                 del realpath, basepath
339
340         libcver=[]
341         libclist  = vardb.match("virtual/libc")
342         libclist += vardb.match("virtual/glibc")
343         libclist  = portage.util.unique_array(libclist)
344         for x in libclist:
345                 xs=portage.catpkgsplit(x)
346                 if libcver:
347                         libcver+=","+"-".join(xs[1:])
348                 else:
349                         libcver="-".join(xs[1:])
350         if libcver==[]:
351                 libcver="unavailable"
352
353         gccver = getgccversion(chost)
354         unameout=platform.release()+" "+platform.machine()
355
356         return "Portage " + portage.VERSION +" ("+profilever+", "+gccver+", "+libcver+", "+unameout+")"
357
358 def create_depgraph_params(myopts, myaction):
359         #configure emerge engine parameters
360         #
361         # self:      include _this_ package regardless of if it is merged.
362         # selective: exclude the package if it is merged
363         # recurse:   go into the dependencies
364         # deep:      go into the dependencies of already merged packages
365         # empty:     pretend nothing is merged
366         # complete:  completely account for all known dependencies
367         # remove:    build graph for use in removing packages
368         myparams = set(["recurse"])
369
370         if myaction == "remove":
371                 myparams.add("remove")
372                 myparams.add("complete")
373                 return myparams
374
375         if "--update" in myopts or \
376                 "--newuse" in myopts or \
377                 "--reinstall" in myopts or \
378                 "--noreplace" in myopts:
379                 myparams.add("selective")
380         if "--emptytree" in myopts:
381                 myparams.add("empty")
382                 myparams.discard("selective")
383         if "--nodeps" in myopts:
384                 myparams.discard("recurse")
385         if "--deep" in myopts:
386                 myparams.add("deep")
387         if "--complete-graph" in myopts:
388                 myparams.add("complete")
389         return myparams
390
391 # search functionality
392 class search(object):
393
394         #
395         # class constants
396         #
397         VERSION_SHORT=1
398         VERSION_RELEASE=2
399
400         #
401         # public interface
402         #
403         def __init__(self, root_config, spinner, searchdesc,
404                 verbose, usepkg, usepkgonly):
405                 """Searches the available and installed packages for the supplied search key.
406                 The list of available and installed packages is created at object instantiation.
407                 This makes successive searches faster."""
408                 self.settings = root_config.settings
409                 self.vartree = root_config.trees["vartree"]
410                 self.spinner = spinner
411                 self.verbose = verbose
412                 self.searchdesc = searchdesc
413                 self.root_config = root_config
414                 self.setconfig = root_config.setconfig
415                 self.matches = {"pkg" : []}
416                 self.mlen = 0
417
418                 def fake_portdb():
419                         pass
420                 self.portdb = fake_portdb
421                 for attrib in ("aux_get", "cp_all",
422                         "xmatch", "findname", "getFetchMap"):
423                         setattr(fake_portdb, attrib, getattr(self, "_"+attrib))
424
425                 self._dbs = []
426
427                 portdb = root_config.trees["porttree"].dbapi
428                 bindb = root_config.trees["bintree"].dbapi
429                 vardb = root_config.trees["vartree"].dbapi
430
431                 if not usepkgonly and portdb._have_root_eclass_dir:
432                         self._dbs.append(portdb)
433
434                 if (usepkg or usepkgonly) and bindb.cp_all():
435                         self._dbs.append(bindb)
436
437                 self._dbs.append(vardb)
438                 self._portdb = portdb
439
440         def _cp_all(self):
441                 cp_all = set()
442                 for db in self._dbs:
443                         cp_all.update(db.cp_all())
444                 return list(sorted(cp_all))
445
446         def _aux_get(self, *args, **kwargs):
447                 for db in self._dbs:
448                         try:
449                                 return db.aux_get(*args, **kwargs)
450                         except KeyError:
451                                 pass
452                 raise
453
454         def _findname(self, *args, **kwargs):
455                 for db in self._dbs:
456                         if db is not self._portdb:
457                                 # We don't want findname to return anything
458                                 # unless it's an ebuild in a portage tree.
459                                 # Otherwise, it's already built and we don't
460                                 # care about it.
461                                 continue
462                         func = getattr(db, "findname", None)
463                         if func:
464                                 value = func(*args, **kwargs)
465                                 if value:
466                                         return value
467                 return None
468
469         def _getFetchMap(self, *args, **kwargs):
470                 for db in self._dbs:
471                         func = getattr(db, "getFetchMap", None)
472                         if func:
473                                 value = func(*args, **kwargs)
474                                 if value:
475                                         return value
476                 return {}
477
478         def _visible(self, db, cpv, metadata):
479                 installed = db is self.vartree.dbapi
480                 built = installed or db is not self._portdb
481                 pkg_type = "ebuild"
482                 if installed:
483                         pkg_type = "installed"
484                 elif built:
485                         pkg_type = "binary"
486                 return visible(self.settings,
487                         Package(type_name=pkg_type, root_config=self.root_config,
488                         cpv=cpv, built=built, installed=installed, metadata=metadata))
489
490         def _xmatch(self, level, atom):
491                 """
492                 This method does not expand old-style virtuals because it
493                 is restricted to returning matches for a single ${CATEGORY}/${PN}
494                 and old-style virual matches unreliable for that when querying
495                 multiple package databases. If necessary, old-style virtuals
496                 can be performed on atoms prior to calling this method.
497                 """
498                 cp = portage.dep_getkey(atom)
499                 if level == "match-all":
500                         matches = set()
501                         for db in self._dbs:
502                                 if hasattr(db, "xmatch"):
503                                         matches.update(db.xmatch(level, atom))
504                                 else:
505                                         matches.update(db.match(atom))
506                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
507                         db._cpv_sort_ascending(result)
508                 elif level == "match-visible":
509                         matches = set()
510                         for db in self._dbs:
511                                 if hasattr(db, "xmatch"):
512                                         matches.update(db.xmatch(level, atom))
513                                 else:
514                                         db_keys = list(db._aux_cache_keys)
515                                         for cpv in db.match(atom):
516                                                 metadata = izip(db_keys,
517                                                         db.aux_get(cpv, db_keys))
518                                                 if not self._visible(db, cpv, metadata):
519                                                         continue
520                                                 matches.add(cpv)
521                         result = list(x for x in matches if portage.cpv_getkey(x) == cp)
522                         db._cpv_sort_ascending(result)
523                 elif level == "bestmatch-visible":
524                         result = None
525                         for db in self._dbs:
526                                 if hasattr(db, "xmatch"):
527                                         cpv = db.xmatch("bestmatch-visible", atom)
528                                         if not cpv or portage.cpv_getkey(cpv) != cp:
529                                                 continue
530                                         if not result or cpv == portage.best([cpv, result]):
531                                                 result = cpv
532                                 else:
533                                         db_keys = Package.metadata_keys
534                                         # break out of this loop with highest visible
535                                         # match, checked in descending order
536                                         for cpv in reversed(db.match(atom)):
537                                                 if portage.cpv_getkey(cpv) != cp:
538                                                         continue
539                                                 metadata = izip(db_keys,
540                                                         db.aux_get(cpv, db_keys))
541                                                 if not self._visible(db, cpv, metadata):
542                                                         continue
543                                                 if not result or cpv == portage.best([cpv, result]):
544                                                         result = cpv
545                                                 break
546                 else:
547                         raise NotImplementedError(level)
548                 return result
549
550         def execute(self,searchkey):
551                 """Performs the search for the supplied search key"""
552                 match_category = 0
553                 self.searchkey=searchkey
554                 self.packagematches = []
555                 if self.searchdesc:
556                         self.searchdesc=1
557                         self.matches = {"pkg":[], "desc":[], "set":[]}
558                 else:
559                         self.searchdesc=0
560                         self.matches = {"pkg":[], "set":[]}
561                 print "Searching...   ",
562
563                 regexsearch = False
564                 if self.searchkey.startswith('%'):
565                         regexsearch = True
566                         self.searchkey = self.searchkey[1:]
567                 if self.searchkey.startswith('@'):
568                         match_category = 1
569                         self.searchkey = self.searchkey[1:]
570                 if regexsearch:
571                         self.searchre=re.compile(self.searchkey,re.I)
572                 else:
573                         self.searchre=re.compile(re.escape(self.searchkey), re.I)
574                 for package in self.portdb.cp_all():
575                         self.spinner.update()
576
577                         if match_category:
578                                 match_string  = package[:]
579                         else:
580                                 match_string  = package.split("/")[-1]
581
582                         masked=0
583                         if self.searchre.search(match_string):
584                                 if not self.portdb.xmatch("match-visible", package):
585                                         masked=1
586                                 self.matches["pkg"].append([package,masked])
587                         elif self.searchdesc: # DESCRIPTION searching
588                                 full_package = self.portdb.xmatch("bestmatch-visible", package)
589                                 if not full_package:
590                                         #no match found; we don't want to query description
591                                         full_package = portage.best(
592                                                 self.portdb.xmatch("match-all", package))
593                                         if not full_package:
594                                                 continue
595                                         else:
596                                                 masked=1
597                                 try:
598                                         full_desc = self.portdb.aux_get(
599                                                 full_package, ["DESCRIPTION"])[0]
600                                 except KeyError:
601                                         print "emerge: search: aux_get() failed, skipping"
602                                         continue
603                                 if self.searchre.search(full_desc):
604                                         self.matches["desc"].append([full_package,masked])
605
606                 self.sdict = self.setconfig.getSets()
607                 for setname in self.sdict:
608                         self.spinner.update()
609                         if match_category:
610                                 match_string = setname
611                         else:
612                                 match_string = setname.split("/")[-1]
613                         
614                         if self.searchre.search(match_string):
615                                 self.matches["set"].append([setname, False])
616                         elif self.searchdesc:
617                                 if self.searchre.search(
618                                         self.sdict[setname].getMetadata("DESCRIPTION")):
619                                         self.matches["set"].append([setname, False])
620                         
621                 self.mlen=0
622                 for mtype in self.matches:
623                         self.matches[mtype].sort()
624                         self.mlen += len(self.matches[mtype])
625
626         def addCP(self, cp):
627                 if not self.portdb.xmatch("match-all", cp):
628                         return
629                 masked = 0
630                 if not self.portdb.xmatch("bestmatch-visible", cp):
631                         masked = 1
632                 self.matches["pkg"].append([cp, masked])
633                 self.mlen += 1
634
635         def output(self):
636                 """Outputs the results of the search."""
637                 print "\b\b  \n[ Results for search key : "+white(self.searchkey)+" ]"
638                 print "[ Applications found : "+white(str(self.mlen))+" ]"
639                 print " "
640                 vardb = self.vartree.dbapi
641                 for mtype in self.matches:
642                         for match,masked in self.matches[mtype]:
643                                 full_package = None
644                                 if mtype == "pkg":
645                                         catpack = match
646                                         full_package = self.portdb.xmatch(
647                                                 "bestmatch-visible", match)
648                                         if not full_package:
649                                                 #no match found; we don't want to query description
650                                                 masked=1
651                                                 full_package = portage.best(
652                                                         self.portdb.xmatch("match-all",match))
653                                 elif mtype == "desc":
654                                         full_package = match
655                                         match        = portage.cpv_getkey(match)
656                                 elif mtype == "set":
657                                         print green("*")+"  "+white(match)
658                                         print "     ", darkgreen("Description:")+"  ", self.sdict[match].getMetadata("DESCRIPTION")
659                                         print
660                                 if full_package:
661                                         try:
662                                                 desc, homepage, license = self.portdb.aux_get(
663                                                         full_package, ["DESCRIPTION","HOMEPAGE","LICENSE"])
664                                         except KeyError:
665                                                 print "emerge: search: aux_get() failed, skipping"
666                                                 continue
667                                         if masked:
668                                                 print green("*")+"  "+white(match)+" "+red("[ Masked ]")
669                                         else:
670                                                 print green("*")+"  "+white(match)
671                                         myversion = self.getVersion(full_package, search.VERSION_RELEASE)
672
673                                         mysum = [0,0]
674                                         file_size_str = None
675                                         mycat = match.split("/")[0]
676                                         mypkg = match.split("/")[1]
677                                         mycpv = match + "-" + myversion
678                                         myebuild = self.portdb.findname(mycpv)
679                                         if myebuild:
680                                                 pkgdir = os.path.dirname(myebuild)
681                                                 from portage import manifest
682                                                 mf = manifest.Manifest(
683                                                         pkgdir, self.settings["DISTDIR"])
684                                                 try:
685                                                         uri_map = self.portdb.getFetchMap(mycpv)
686                                                 except portage.exception.InvalidDependString, e:
687                                                         file_size_str = "Unknown (%s)" % (e,)
688                                                         del e
689                                                 else:
690                                                         try:
691                                                                 mysum[0] = mf.getDistfilesSize(uri_map)
692                                                         except KeyError, e:
693                                                                 file_size_str = "Unknown (missing " + \
694                                                                         "digest for %s)" % (e,)
695                                                                 del e
696
697                                         available = False
698                                         for db in self._dbs:
699                                                 if db is not vardb and \
700                                                         db.cpv_exists(mycpv):
701                                                         available = True
702                                                         if not myebuild and hasattr(db, "bintree"):
703                                                                 myebuild = db.bintree.getname(mycpv)
704                                                                 try:
705                                                                         mysum[0] = os.stat(myebuild).st_size
706                                                                 except OSError:
707                                                                         myebuild = None
708                                                         break
709
710                                         if myebuild and file_size_str is None:
711                                                 mystr = str(mysum[0] / 1024)
712                                                 mycount = len(mystr)
713                                                 while (mycount > 3):
714                                                         mycount -= 3
715                                                         mystr = mystr[:mycount] + "," + mystr[mycount:]
716                                                 file_size_str = mystr + " kB"
717
718                                         if self.verbose:
719                                                 if available:
720                                                         print "     ", darkgreen("Latest version available:"),myversion
721                                                 print "     ", self.getInstallationStatus(mycat+'/'+mypkg)
722                                                 if myebuild:
723                                                         print "      %s %s" % \
724                                                                 (darkgreen("Size of files:"), file_size_str)
725                                                 print "     ", darkgreen("Homepage:")+"     ",homepage
726                                                 print "     ", darkgreen("Description:")+"  ",desc
727                                                 print "     ", darkgreen("License:")+"      ",license
728                                                 print
729         #
730         # private interface
731         #
732         def getInstallationStatus(self,package):
733                 installed_package = self.vartree.dep_bestmatch(package)
734                 result = ""
735                 version = self.getVersion(installed_package,search.VERSION_RELEASE)
736                 if len(version) > 0:
737                         result = darkgreen("Latest version installed:")+" "+version
738                 else:
739                         result = darkgreen("Latest version installed:")+" [ Not Installed ]"
740                 return result
741
742         def getVersion(self,full_package,detail):
743                 if len(full_package) > 1:
744                         package_parts = portage.catpkgsplit(full_package)
745                         if detail == search.VERSION_RELEASE and package_parts[3] != 'r0':
746                                 result = package_parts[2]+ "-" + package_parts[3]
747                         else:
748                                 result = package_parts[2]
749                 else:
750                         result = ""
751                 return result
752
753 class RootConfig(object):
754         """This is used internally by depgraph to track information about a
755         particular $ROOT."""
756
757         pkg_tree_map = {
758                 "ebuild"    : "porttree",
759                 "binary"    : "bintree",
760                 "installed" : "vartree"
761         }
762
763         tree_pkg_map = {}
764         for k, v in pkg_tree_map.iteritems():
765                 tree_pkg_map[v] = k
766
767         def __init__(self, settings, trees, setconfig):
768                 self.trees = trees
769                 self.settings = settings
770                 self.iuse_implicit = tuple(sorted(settings._get_implicit_iuse()))
771                 self.root = self.settings["ROOT"]
772                 self.setconfig = setconfig
773                 self.sets = self.setconfig.getSets()
774                 self.visible_pkgs = PackageVirtualDbapi(self.settings)
775
776 def create_world_atom(pkg, args_set, root_config):
777         """Create a new atom for the world file if one does not exist.  If the
778         argument atom is precise enough to identify a specific slot then a slot
779         atom will be returned. Atoms that are in the system set may also be stored
780         in world since system atoms can only match one slot while world atoms can
781         be greedy with respect to slots.  Unslotted system packages will not be
782         stored in world."""
783
784         arg_atom = args_set.findAtomForPackage(pkg)
785         if not arg_atom:
786                 return None
787         cp = portage.dep_getkey(arg_atom)
788         new_world_atom = cp
789         sets = root_config.sets
790         portdb = root_config.trees["porttree"].dbapi
791         vardb = root_config.trees["vartree"].dbapi
792         available_slots = set(portdb.aux_get(cpv, ["SLOT"])[0] \
793                 for cpv in portdb.match(cp))
794         slotted = len(available_slots) > 1 or \
795                 (len(available_slots) == 1 and "0" not in available_slots)
796         if not slotted:
797                 # check the vdb in case this is multislot
798                 available_slots = set(vardb.aux_get(cpv, ["SLOT"])[0] \
799                         for cpv in vardb.match(cp))
800                 slotted = len(available_slots) > 1 or \
801                         (len(available_slots) == 1 and "0" not in available_slots)
802         if slotted and arg_atom != cp:
803                 # If the user gave a specific atom, store it as a
804                 # slot atom in the world file.
805                 slot_atom = pkg.slot_atom
806
807                 # For USE=multislot, there are a couple of cases to
808                 # handle here:
809                 #
810                 # 1) SLOT="0", but the real SLOT spontaneously changed to some
811                 #    unknown value, so just record an unslotted atom.
812                 #
813                 # 2) SLOT comes from an installed package and there is no
814                 #    matching SLOT in the portage tree.
815                 #
816                 # Make sure that the slot atom is available in either the
817                 # portdb or the vardb, since otherwise the user certainly
818                 # doesn't want the SLOT atom recorded in the world file
819                 # (case 1 above).  If it's only available in the vardb,
820                 # the user may be trying to prevent a USE=multislot
821                 # package from being removed by --depclean (case 2 above).
822
823                 mydb = portdb
824                 if not portdb.match(slot_atom):
825                         # SLOT seems to come from an installed multislot package
826                         mydb = vardb
827                 # If there is no installed package matching the SLOT atom,
828                 # it probably changed SLOT spontaneously due to USE=multislot,
829                 # so just record an unslotted atom.
830                 if vardb.match(slot_atom):
831                         # Now verify that the argument is precise
832                         # enough to identify a specific slot.
833                         matches = mydb.match(arg_atom)
834                         matched_slots = set()
835                         for cpv in matches:
836                                 matched_slots.add(mydb.aux_get(cpv, ["SLOT"])[0])
837                         if len(matched_slots) == 1:
838                                 new_world_atom = slot_atom
839
840         if new_world_atom == sets["world"].findAtomForPackage(pkg):
841                 # Both atoms would be identical, so there's nothing to add.
842                 return None
843         if not slotted:
844                 # Unlike world atoms, system atoms are not greedy for slots, so they
845                 # can't be safely excluded from world if they are slotted.
846                 system_atom = sets["system"].findAtomForPackage(pkg)
847                 if system_atom:
848                         if not portage.dep_getkey(system_atom).startswith("virtual/"):
849                                 return None
850                         # System virtuals aren't safe to exclude from world since they can
851                         # match multiple old-style virtuals but only one of them will be
852                         # pulled in by update or depclean.
853                         providers = portdb.mysettings.getvirtuals().get(
854                                 portage.dep_getkey(system_atom))
855                         if providers and len(providers) == 1 and providers[0] == cp:
856                                 return None
857         return new_world_atom
858
859 def filter_iuse_defaults(iuse):
860         for flag in iuse:
861                 if flag.startswith("+") or flag.startswith("-"):
862                         yield flag[1:]
863                 else:
864                         yield flag
865
866 class SlotObject(object):
867         __slots__ = ("__weakref__",)
868
869         def __init__(self, **kwargs):
870                 classes = [self.__class__]
871                 while classes:
872                         c = classes.pop()
873                         if c is SlotObject:
874                                 continue
875                         classes.extend(c.__bases__)
876                         slots = getattr(c, "__slots__", None)
877                         if not slots:
878                                 continue
879                         for myattr in slots:
880                                 myvalue = kwargs.get(myattr, None)
881                                 setattr(self, myattr, myvalue)
882
883         def copy(self):
884                 """
885                 Create a new instance and copy all attributes
886                 defined from __slots__ (including those from
887                 inherited classes).
888                 """
889                 obj = self.__class__()
890
891                 classes = [self.__class__]
892                 while classes:
893                         c = classes.pop()
894                         if c is SlotObject:
895                                 continue
896                         classes.extend(c.__bases__)
897                         slots = getattr(c, "__slots__", None)
898                         if not slots:
899                                 continue
900                         for myattr in slots:
901                                 setattr(obj, myattr, getattr(self, myattr))
902
903                 return obj
904
905 class AbstractDepPriority(SlotObject):
906         __slots__ = ("buildtime", "runtime", "runtime_post")
907
908         def __lt__(self, other):
909                 return self.__int__() < other
910
911         def __le__(self, other):
912                 return self.__int__() <= other
913
914         def __eq__(self, other):
915                 return self.__int__() == other
916
917         def __ne__(self, other):
918                 return self.__int__() != other
919
920         def __gt__(self, other):
921                 return self.__int__() > other
922
923         def __ge__(self, other):
924                 return self.__int__() >= other
925
926         def copy(self):
927                 import copy
928                 return copy.copy(self)
929
930 class DepPriority(AbstractDepPriority):
931         """
932                 This class generates an integer priority level based of various
933                 attributes of the dependency relationship.  Attributes can be assigned
934                 at any time and the new integer value will be generated on calls to the
935                 __int__() method.  Rich comparison operators are supported.
936
937                 The boolean attributes that affect the integer value are "satisfied",
938                 "buildtime", "runtime", and "system".  Various combinations of
939                 attributes lead to the following priority levels:
940
941                 Combination of properties           Priority  Category
942
943                 not satisfied and buildtime            0       HARD
944                 not satisfied and runtime             -1       MEDIUM
945                 not satisfied and runtime_post        -2       MEDIUM_SOFT
946                 satisfied and buildtime and rebuild   -3       SOFT
947                 satisfied and buildtime               -4       SOFT
948                 satisfied and runtime                 -5       SOFT
949                 satisfied and runtime_post            -6       SOFT
950                 (none of the above)                   -6       SOFT
951
952                 Several integer constants are defined for categorization of priority
953                 levels:
954
955                 MEDIUM   The upper boundary for medium dependencies.
956                 MEDIUM_SOFT   The upper boundary for medium-soft dependencies.
957                 SOFT     The upper boundary for soft dependencies.
958                 MIN      The lower boundary for soft dependencies.
959         """
960         __slots__ = ("satisfied", "rebuild")
961         MEDIUM = -1
962         MEDIUM_SOFT = -2
963         SOFT   = -3
964         MIN    = -6
965
966         def __int__(self):
967                 if not self.satisfied:
968                         if self.buildtime:
969                                 return 0
970                         if self.runtime:
971                                 return -1
972                         if self.runtime_post:
973                                 return -2
974                 if self.buildtime:
975                         if self.rebuild:
976                                 return -3
977                         return -4
978                 if self.runtime:
979                         return -5
980                 if self.runtime_post:
981                         return -6
982                 return -6
983
984         def __str__(self):
985                 myvalue = self.__int__()
986                 if myvalue > self.MEDIUM:
987                         return "hard"
988                 if myvalue > self.MEDIUM_SOFT:
989                         return "medium"
990                 if myvalue > self.SOFT:
991                         return "medium-soft"
992                 return "soft"
993
994 class BlockerDepPriority(DepPriority):
995         __slots__ = ()
996         def __int__(self):
997                 return 0
998
999 BlockerDepPriority.instance = BlockerDepPriority()
1000
1001 class UnmergeDepPriority(AbstractDepPriority):
1002         __slots__ = ("satisfied",)
1003         """
1004         Combination of properties           Priority  Category
1005
1006         runtime                                0       HARD
1007         runtime_post                          -1       HARD
1008         buildtime                             -2       SOFT
1009         (none of the above)                   -2       SOFT
1010         """
1011
1012         MAX    =  0
1013         SOFT   = -2
1014         MIN    = -2
1015
1016         def __int__(self):
1017                 if self.runtime:
1018                         return 0
1019                 if self.runtime_post:
1020                         return -1
1021                 if self.buildtime:
1022                         return -2
1023                 return -2
1024
1025         def __str__(self):
1026                 myvalue = self.__int__()
1027                 if myvalue > self.SOFT:
1028                         return "hard"
1029                 return "soft"
1030
1031 class FakeVartree(portage.vartree):
1032         """This is implements an in-memory copy of a vartree instance that provides
1033         all the interfaces required for use by the depgraph.  The vardb is locked
1034         during the constructor call just long enough to read a copy of the
1035         installed package information.  This allows the depgraph to do it's
1036         dependency calculations without holding a lock on the vardb.  It also
1037         allows things like vardb global updates to be done in memory so that the
1038         user doesn't necessarily need write access to the vardb in cases where
1039         global updates are necessary (updates are performed when necessary if there
1040         is not a matching ebuild in the tree)."""
1041         def __init__(self, root_config, pkg_cache=None, acquire_lock=1):
1042                 self._root_config = root_config
1043                 if pkg_cache is None:
1044                         pkg_cache = {}
1045                 real_vartree = root_config.trees["vartree"]
1046                 portdb = root_config.trees["porttree"].dbapi
1047                 self.root = real_vartree.root
1048                 self.settings = real_vartree.settings
1049                 mykeys = list(real_vartree.dbapi._aux_cache_keys)
1050                 if "_mtime_" not in mykeys:
1051                         mykeys.append("_mtime_")
1052                 self._db_keys = mykeys
1053                 self._pkg_cache = pkg_cache
1054                 self.dbapi = PackageVirtualDbapi(real_vartree.settings)
1055                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1056                 try:
1057                         # At least the parent needs to exist for the lock file.
1058                         portage.util.ensure_dirs(vdb_path)
1059                 except portage.exception.PortageException:
1060                         pass
1061                 vdb_lock = None
1062                 try:
1063                         if acquire_lock and os.access(vdb_path, os.W_OK):
1064                                 vdb_lock = portage.locks.lockdir(vdb_path)
1065                         real_dbapi = real_vartree.dbapi
1066                         slot_counters = {}
1067                         for cpv in real_dbapi.cpv_all():
1068                                 cache_key = ("installed", self.root, cpv, "nomerge")
1069                                 pkg = self._pkg_cache.get(cache_key)
1070                                 if pkg is not None:
1071                                         metadata = pkg.metadata
1072                                 else:
1073                                         metadata = dict(izip(mykeys, real_dbapi.aux_get(cpv, mykeys)))
1074                                 myslot = metadata["SLOT"]
1075                                 mycp = portage.dep_getkey(cpv)
1076                                 myslot_atom = "%s:%s" % (mycp, myslot)
1077                                 try:
1078                                         mycounter = long(metadata["COUNTER"])
1079                                 except ValueError:
1080                                         mycounter = 0
1081                                         metadata["COUNTER"] = str(mycounter)
1082                                 other_counter = slot_counters.get(myslot_atom, None)
1083                                 if other_counter is not None:
1084                                         if other_counter > mycounter:
1085                                                 continue
1086                                 slot_counters[myslot_atom] = mycounter
1087                                 if pkg is None:
1088                                         pkg = Package(built=True, cpv=cpv,
1089                                                 installed=True, metadata=metadata,
1090                                                 root_config=root_config, type_name="installed")
1091                                 self._pkg_cache[pkg] = pkg
1092                                 self.dbapi.cpv_inject(pkg)
1093                         real_dbapi.flush_cache()
1094                 finally:
1095                         if vdb_lock:
1096                                 portage.locks.unlockdir(vdb_lock)
1097                 # Populate the old-style virtuals using the cached values.
1098                 if not self.settings.treeVirtuals:
1099                         self.settings.treeVirtuals = portage.util.map_dictlist_vals(
1100                                 portage.getCPFromCPV, self.get_all_provides())
1101
1102                 # Intialize variables needed for lazy cache pulls of the live ebuild
1103                 # metadata.  This ensures that the vardb lock is released ASAP, without
1104                 # being delayed in case cache generation is triggered.
1105                 self._aux_get = self.dbapi.aux_get
1106                 self.dbapi.aux_get = self._aux_get_wrapper
1107                 self._match = self.dbapi.match
1108                 self.dbapi.match = self._match_wrapper
1109                 self._aux_get_history = set()
1110                 self._portdb_keys = ["EAPI", "DEPEND", "RDEPEND", "PDEPEND"]
1111                 self._portdb = portdb
1112                 self._global_updates = None
1113
1114         def _match_wrapper(self, cpv, use_cache=1):
1115                 """
1116                 Make sure the metadata in Package instances gets updated for any
1117                 cpv that is returned from a match() call, since the metadata can
1118                 be accessed directly from the Package instance instead of via
1119                 aux_get().
1120                 """
1121                 matches = self._match(cpv, use_cache=use_cache)
1122                 for cpv in matches:
1123                         if cpv in self._aux_get_history:
1124                                 continue
1125                         self._aux_get_wrapper(cpv, [])
1126                 return matches
1127
1128         def _aux_get_wrapper(self, pkg, wants):
1129                 if pkg in self._aux_get_history:
1130                         return self._aux_get(pkg, wants)
1131                 self._aux_get_history.add(pkg)
1132                 try:
1133                         # Use the live ebuild metadata if possible.
1134                         live_metadata = dict(izip(self._portdb_keys,
1135                                 self._portdb.aux_get(pkg, self._portdb_keys)))
1136                         if not portage.eapi_is_supported(live_metadata["EAPI"]):
1137                                 raise KeyError(pkg)
1138                         self.dbapi.aux_update(pkg, live_metadata)
1139                 except (KeyError, portage.exception.PortageException):
1140                         if self._global_updates is None:
1141                                 self._global_updates = \
1142                                         grab_global_updates(self._portdb.porttree_root)
1143                         perform_global_updates(
1144                                 pkg, self.dbapi, self._global_updates)
1145                 return self._aux_get(pkg, wants)
1146
1147         def sync(self, acquire_lock=1):
1148                 """
1149                 Call this method to synchronize state with the real vardb
1150                 after one or more packages may have been installed or
1151                 uninstalled.
1152                 """
1153                 vdb_path = os.path.join(self.root, portage.VDB_PATH)
1154                 try:
1155                         # At least the parent needs to exist for the lock file.
1156                         portage.util.ensure_dirs(vdb_path)
1157                 except portage.exception.PortageException:
1158                         pass
1159                 vdb_lock = None
1160                 try:
1161                         if acquire_lock and os.access(vdb_path, os.W_OK):
1162                                 vdb_lock = portage.locks.lockdir(vdb_path)
1163                         self._sync()
1164                 finally:
1165                         if vdb_lock:
1166                                 portage.locks.unlockdir(vdb_lock)
1167
1168         def _sync(self):
1169
1170                 real_vardb = self._root_config.trees["vartree"].dbapi
1171                 current_cpv_set = frozenset(real_vardb.cpv_all())
1172                 pkg_vardb = self.dbapi
1173                 aux_get_history = self._aux_get_history
1174
1175                 # Remove any packages that have been uninstalled.
1176                 for pkg in list(pkg_vardb):
1177                         if pkg.cpv not in current_cpv_set:
1178                                 pkg_vardb.cpv_remove(pkg)
1179                                 aux_get_history.discard(pkg.cpv)
1180
1181                 # Validate counters and timestamps.
1182                 slot_counters = {}
1183                 root = self.root
1184                 validation_keys = ["COUNTER", "_mtime_"]
1185                 for cpv in current_cpv_set:
1186
1187                         pkg_hash_key = ("installed", root, cpv, "nomerge")
1188                         pkg = pkg_vardb.get(pkg_hash_key)
1189                         if pkg is not None:
1190                                 counter, mtime = real_vardb.aux_get(cpv, validation_keys)
1191                                 try:
1192                                         counter = long(counter)
1193                                 except ValueError:
1194                                         counter = 0
1195
1196                                 if counter != pkg.counter or \
1197                                         mtime != pkg.mtime:
1198                                         pkg_vardb.cpv_remove(pkg)
1199                                         aux_get_history.discard(pkg.cpv)
1200                                         pkg = None
1201
1202                         if pkg is None:
1203                                 pkg = self._pkg(cpv)
1204
1205                         other_counter = slot_counters.get(pkg.slot_atom)
1206                         if other_counter is not None:
1207                                 if other_counter > pkg.counter:
1208                                         continue
1209
1210                         slot_counters[pkg.slot_atom] = pkg.counter
1211                         pkg_vardb.cpv_inject(pkg)
1212
1213                 real_vardb.flush_cache()
1214
1215         def _pkg(self, cpv):
1216                 root_config = self._root_config
1217                 real_vardb = root_config.trees["vartree"].dbapi
1218                 pkg = Package(cpv=cpv, installed=True,
1219                         metadata=izip(self._db_keys,
1220                         real_vardb.aux_get(cpv, self._db_keys)),
1221                         root_config=root_config,
1222                         type_name="installed")
1223
1224                 try:
1225                         mycounter = long(pkg.metadata["COUNTER"])
1226                 except ValueError:
1227                         mycounter = 0
1228                         pkg.metadata["COUNTER"] = str(mycounter)
1229
1230                 return pkg
1231
1232 def grab_global_updates(portdir):
1233         from portage.update import grab_updates, parse_updates
1234         updpath = os.path.join(portdir, "profiles", "updates")
1235         try:
1236                 rawupdates = grab_updates(updpath)
1237         except portage.exception.DirectoryNotFound:
1238                 rawupdates = []
1239         upd_commands = []
1240         for mykey, mystat, mycontent in rawupdates:
1241                 commands, errors = parse_updates(mycontent)
1242                 upd_commands.extend(commands)
1243         return upd_commands
1244
1245 def perform_global_updates(mycpv, mydb, mycommands):
1246         from portage.update import update_dbentries
1247         aux_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
1248         aux_dict = dict(izip(aux_keys, mydb.aux_get(mycpv, aux_keys)))
1249         updates = update_dbentries(mycommands, aux_dict)
1250         if updates:
1251                 mydb.aux_update(mycpv, updates)
1252
1253 def visible(pkgsettings, pkg):
1254         """
1255         Check if a package is visible. This can raise an InvalidDependString
1256         exception if LICENSE is invalid.
1257         TODO: optionally generate a list of masking reasons
1258         @rtype: Boolean
1259         @returns: True if the package is visible, False otherwise.
1260         """
1261         if not pkg.metadata["SLOT"]:
1262                 return False
1263         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1264                 if not pkgsettings._accept_chost(pkg):
1265                         return False
1266         eapi = pkg.metadata["EAPI"]
1267         if not portage.eapi_is_supported(eapi):
1268                 return False
1269         if not pkg.installed:
1270                 if portage._eapi_is_deprecated(eapi):
1271                         return False
1272                 if pkgsettings._getMissingKeywords(pkg.cpv, pkg.metadata):
1273                         return False
1274         if pkgsettings._getMaskAtom(pkg.cpv, pkg.metadata):
1275                 return False
1276         if pkgsettings._getProfileMaskAtom(pkg.cpv, pkg.metadata):
1277                 return False
1278         try:
1279                 if pkgsettings._getMissingLicenses(pkg.cpv, pkg.metadata):
1280                         return False
1281         except portage.exception.InvalidDependString:
1282                 return False
1283         return True
1284
1285 def get_masking_status(pkg, pkgsettings, root_config):
1286
1287         mreasons = portage.getmaskingstatus(
1288                 pkg, settings=pkgsettings,
1289                 portdb=root_config.trees["porttree"].dbapi)
1290
1291         if pkg.built and not pkg.installed and "CHOST" in pkg.metadata:
1292                 if not pkgsettings._accept_chost(pkg):
1293                         mreasons.append("CHOST: %s" % \
1294                                 pkg.metadata["CHOST"])
1295
1296         if not pkg.metadata["SLOT"]:
1297                 mreasons.append("invalid: SLOT is undefined")
1298
1299         return mreasons
1300
1301 def get_mask_info(root_config, cpv, pkgsettings,
1302         db, pkg_type, built, installed, db_keys):
1303         eapi_masked = False
1304         try:
1305                 metadata = dict(izip(db_keys,
1306                         db.aux_get(cpv, db_keys)))
1307         except KeyError:
1308                 metadata = None
1309         if metadata and not built:
1310                 pkgsettings.setcpv(cpv, mydb=metadata)
1311                 metadata["USE"] = pkgsettings["PORTAGE_USE"]
1312         if metadata is None:
1313                 mreasons = ["corruption"]
1314         else:
1315                 pkg = Package(type_name=pkg_type, root_config=root_config,
1316                         cpv=cpv, built=built, installed=installed, metadata=metadata)
1317                 mreasons = get_masking_status(pkg, pkgsettings, root_config)
1318         return metadata, mreasons
1319
1320 def show_masked_packages(masked_packages):
1321         shown_licenses = set()
1322         shown_comments = set()
1323         # Maybe there is both an ebuild and a binary. Only
1324         # show one of them to avoid redundant appearance.
1325         shown_cpvs = set()
1326         have_eapi_mask = False
1327         for (root_config, pkgsettings, cpv,
1328                 metadata, mreasons) in masked_packages:
1329                 if cpv in shown_cpvs:
1330                         continue
1331                 shown_cpvs.add(cpv)
1332                 comment, filename = None, None
1333                 if "package.mask" in mreasons:
1334                         comment, filename = \
1335                                 portage.getmaskingreason(
1336                                 cpv, metadata=metadata,
1337                                 settings=pkgsettings,
1338                                 portdb=root_config.trees["porttree"].dbapi,
1339                                 return_location=True)
1340                 missing_licenses = []
1341                 if metadata:
1342                         if not portage.eapi_is_supported(metadata["EAPI"]):
1343                                 have_eapi_mask = True
1344                         try:
1345                                 missing_licenses = \
1346                                         pkgsettings._getMissingLicenses(
1347                                                 cpv, metadata)
1348                         except portage.exception.InvalidDependString:
1349                                 # This will have already been reported
1350                                 # above via mreasons.
1351                                 pass
1352
1353                 print "- "+cpv+" (masked by: "+", ".join(mreasons)+")"
1354                 if comment and comment not in shown_comments:
1355                         print filename+":"
1356                         print comment
1357                         shown_comments.add(comment)
1358                 portdb = root_config.trees["porttree"].dbapi
1359                 for l in missing_licenses:
1360                         l_path = portdb.findLicensePath(l)
1361                         if l in shown_licenses:
1362                                 continue
1363                         msg = ("A copy of the '%s' license" + \
1364                         " is located at '%s'.") % (l, l_path)
1365                         print msg
1366                         print
1367                         shown_licenses.add(l)
1368         return have_eapi_mask
1369
1370 class Task(SlotObject):
1371         __slots__ = ("_hash_key", "_hash_value")
1372
1373         def _get_hash_key(self):
1374                 hash_key = getattr(self, "_hash_key", None)
1375                 if hash_key is None:
1376                         raise NotImplementedError(self)
1377                 return hash_key
1378
1379         def __eq__(self, other):
1380                 return self._get_hash_key() == other
1381
1382         def __ne__(self, other):
1383                 return self._get_hash_key() != other
1384
1385         def __hash__(self):
1386                 hash_value = getattr(self, "_hash_value", None)
1387                 if hash_value is None:
1388                         self._hash_value = hash(self._get_hash_key())
1389                 return self._hash_value
1390
1391         def __len__(self):
1392                 return len(self._get_hash_key())
1393
1394         def __getitem__(self, key):
1395                 return self._get_hash_key()[key]
1396
1397         def __iter__(self):
1398                 return iter(self._get_hash_key())
1399
1400         def __contains__(self, key):
1401                 return key in self._get_hash_key()
1402
1403         def __str__(self):
1404                 return str(self._get_hash_key())
1405
1406 class Blocker(Task):
1407
1408         __hash__ = Task.__hash__
1409         __slots__ = ("root", "atom", "cp", "eapi", "satisfied")
1410
1411         def __init__(self, **kwargs):
1412                 Task.__init__(self, **kwargs)
1413                 self.cp = portage.dep_getkey(self.atom)
1414
1415         def _get_hash_key(self):
1416                 hash_key = getattr(self, "_hash_key", None)
1417                 if hash_key is None:
1418                         self._hash_key = \
1419                                 ("blocks", self.root, self.atom, self.eapi)
1420                 return self._hash_key
1421
1422 class Package(Task):
1423
1424         __hash__ = Task.__hash__
1425         __slots__ = ("built", "cpv", "depth",
1426                 "installed", "metadata", "onlydeps", "operation",
1427                 "root_config", "type_name",
1428                 "category", "counter", "cp", "cpv_split",
1429                 "inherited", "iuse", "mtime",
1430                 "pf", "pv_split", "root", "slot", "slot_atom", "use")
1431
1432         metadata_keys = [
1433                 "CHOST", "COUNTER", "DEPEND", "EAPI",
1434                 "INHERITED", "IUSE", "KEYWORDS",
1435                 "LICENSE", "PDEPEND", "PROVIDE", "RDEPEND",
1436                 "repository", "PROPERTIES", "RESTRICT", "SLOT", "USE", "_mtime_"]
1437
1438         def __init__(self, **kwargs):
1439                 Task.__init__(self, **kwargs)
1440                 self.root = self.root_config.root
1441                 self.metadata = _PackageMetadataWrapper(self, self.metadata)
1442                 self.cp = portage.cpv_getkey(self.cpv)
1443                 self.slot_atom = portage.dep.Atom("%s:%s" % (self.cp, self.slot))
1444                 self.category, self.pf = portage.catsplit(self.cpv)
1445                 self.cpv_split = portage.catpkgsplit(self.cpv)
1446                 self.pv_split = self.cpv_split[1:]
1447
1448         class _use(object):
1449
1450                 __slots__ = ("__weakref__", "enabled")
1451
1452                 def __init__(self, use):
1453                         self.enabled = frozenset(use)
1454
1455         class _iuse(object):
1456
1457                 __slots__ = ("__weakref__", "all", "enabled", "disabled", "iuse_implicit", "regex", "tokens")
1458
1459                 def __init__(self, tokens, iuse_implicit):
1460                         self.tokens = tuple(tokens)
1461                         self.iuse_implicit = iuse_implicit
1462                         enabled = []
1463                         disabled = []
1464                         other = []
1465                         for x in tokens:
1466                                 prefix = x[:1]
1467                                 if prefix == "+":
1468                                         enabled.append(x[1:])
1469                                 elif prefix == "-":
1470                                         disabled.append(x[1:])
1471                                 else:
1472                                         other.append(x)
1473                         self.enabled = frozenset(enabled)
1474                         self.disabled = frozenset(disabled)
1475                         self.all = frozenset(chain(enabled, disabled, other))
1476
1477                 def __getattribute__(self, name):
1478                         if name == "regex":
1479                                 try:
1480                                         return object.__getattribute__(self, "regex")
1481                                 except AttributeError:
1482                                         all = object.__getattribute__(self, "all")
1483                                         iuse_implicit = object.__getattribute__(self, "iuse_implicit")
1484                                         # Escape anything except ".*" which is supposed
1485                                         # to pass through from _get_implicit_iuse()
1486                                         regex = (re.escape(x) for x in chain(all, iuse_implicit))
1487                                         regex = "^(%s)$" % "|".join(regex)
1488                                         regex = regex.replace("\\.\\*", ".*")
1489                                         self.regex = re.compile(regex)
1490                         return object.__getattribute__(self, name)
1491
1492         def _get_hash_key(self):
1493                 hash_key = getattr(self, "_hash_key", None)
1494                 if hash_key is None:
1495                         if self.operation is None:
1496                                 self.operation = "merge"
1497                                 if self.onlydeps or self.installed:
1498                                         self.operation = "nomerge"
1499                         self._hash_key = \
1500                                 (self.type_name, self.root, self.cpv, self.operation)
1501                 return self._hash_key
1502
1503         def __lt__(self, other):
1504                 if other.cp != self.cp:
1505                         return False
1506                 if portage.pkgcmp(self.pv_split, other.pv_split) < 0:
1507                         return True
1508                 return False
1509
1510         def __le__(self, other):
1511                 if other.cp != self.cp:
1512                         return False
1513                 if portage.pkgcmp(self.pv_split, other.pv_split) <= 0:
1514                         return True
1515                 return False
1516
1517         def __gt__(self, other):
1518                 if other.cp != self.cp:
1519                         return False
1520                 if portage.pkgcmp(self.pv_split, other.pv_split) > 0:
1521                         return True
1522                 return False
1523
1524         def __ge__(self, other):
1525                 if other.cp != self.cp:
1526                         return False
1527                 if portage.pkgcmp(self.pv_split, other.pv_split) >= 0:
1528                         return True
1529                 return False
1530
1531 _all_metadata_keys = set(x for x in portage.auxdbkeys \
1532         if not x.startswith("UNUSED_"))
1533 _all_metadata_keys.discard("CDEPEND")
1534 _all_metadata_keys.update(Package.metadata_keys)
1535
1536 from portage.cache.mappings import slot_dict_class
1537 _PackageMetadataWrapperBase = slot_dict_class(_all_metadata_keys)
1538
1539 class _PackageMetadataWrapper(_PackageMetadataWrapperBase):
1540         """
1541         Detect metadata updates and synchronize Package attributes.
1542         """
1543
1544         __slots__ = ("_pkg",)
1545         _wrapped_keys = frozenset(
1546                 ["COUNTER", "INHERITED", "IUSE", "SLOT", "USE", "_mtime_"])
1547
1548         def __init__(self, pkg, metadata):
1549                 _PackageMetadataWrapperBase.__init__(self)
1550                 self._pkg = pkg
1551                 self.update(metadata)
1552
1553         def __setitem__(self, k, v):
1554                 _PackageMetadataWrapperBase.__setitem__(self, k, v)
1555                 if k in self._wrapped_keys:
1556                         getattr(self, "_set_" + k.lower())(k, v)
1557
1558         def _set_inherited(self, k, v):
1559                 if isinstance(v, basestring):
1560                         v = frozenset(v.split())
1561                 self._pkg.inherited = v
1562
1563         def _set_iuse(self, k, v):
1564                 self._pkg.iuse = self._pkg._iuse(
1565                         v.split(), self._pkg.root_config.iuse_implicit)
1566
1567         def _set_slot(self, k, v):
1568                 self._pkg.slot = v
1569
1570         def _set_use(self, k, v):
1571                 self._pkg.use = self._pkg._use(v.split())
1572
1573         def _set_counter(self, k, v):
1574                 if isinstance(v, basestring):
1575                         try:
1576                                 v = long(v.strip())
1577                         except ValueError:
1578                                 v = 0
1579                 self._pkg.counter = v
1580
1581         def _set__mtime_(self, k, v):
1582                 if isinstance(v, basestring):
1583                         try:
1584                                 v = long(v.strip())
1585                         except ValueError:
1586                                 v = 0
1587                 self._pkg.mtime = v
1588
1589 class EbuildFetchonly(SlotObject):
1590
1591         __slots__ = ("fetch_all", "pkg", "pretend", "settings")
1592
1593         def execute(self):
1594                 # To spawn pkg_nofetch requires PORTAGE_BUILDDIR for
1595                 # ensuring sane $PWD (bug #239560) and storing elog
1596                 # messages. Use a private temp directory, in order
1597                 # to avoid locking the main one.
1598                 settings = self.settings
1599                 global_tmpdir = settings["PORTAGE_TMPDIR"]
1600                 from tempfile import mkdtemp
1601                 try:
1602                         private_tmpdir = mkdtemp("", "._portage_fetch_.", global_tmpdir)
1603                 except OSError, e:
1604                         if e.errno != portage.exception.PermissionDenied.errno:
1605                                 raise
1606                         raise portage.exception.PermissionDenied(global_tmpdir)
1607                 settings["PORTAGE_TMPDIR"] = private_tmpdir
1608                 settings.backup_changes("PORTAGE_TMPDIR")
1609                 try:
1610                         retval = self._execute()
1611                 finally:
1612                         settings["PORTAGE_TMPDIR"] = global_tmpdir
1613                         settings.backup_changes("PORTAGE_TMPDIR")
1614                         shutil.rmtree(private_tmpdir)
1615                 return retval
1616
1617         def _execute(self):
1618                 settings = self.settings
1619                 pkg = self.pkg
1620                 root_config = pkg.root_config
1621                 portdb = root_config.trees["porttree"].dbapi
1622                 ebuild_path = portdb.findname(pkg.cpv)
1623                 settings.setcpv(pkg)
1624                 debug = settings.get("PORTAGE_DEBUG") == "1"
1625                 use_cache = 1 # always true
1626                 portage.doebuild_environment(ebuild_path, "fetch",
1627                         root_config.root, settings, debug, use_cache, portdb)
1628                 portage.prepare_build_dirs(self.pkg.root, self.settings, 0)
1629
1630                 retval = portage.doebuild(ebuild_path, "fetch",
1631                         self.settings["ROOT"], self.settings, debug=debug,
1632                         listonly=self.pretend, fetchonly=1, fetchall=self.fetch_all,
1633                         mydbapi=portdb, tree="porttree")
1634
1635                 if retval != os.EX_OK:
1636                         msg = "Fetch failed for '%s'" % (pkg.cpv,)
1637                         eerror(msg, phase="unpack", key=pkg.cpv)
1638
1639                 portage.elog.elog_process(self.pkg.cpv, self.settings)
1640                 return retval
1641
1642 class PollConstants(object):
1643
1644         """
1645         Provides POLL* constants that are equivalent to those from the
1646         select module, for use by PollSelectAdapter.
1647         """
1648
1649         names = ("POLLIN", "POLLPRI", "POLLOUT", "POLLERR", "POLLHUP", "POLLNVAL")
1650         v = 1
1651         for k in names:
1652                 locals()[k] = getattr(select, k, v)
1653                 v *= 2
1654         del k, v
1655
1656 class AsynchronousTask(SlotObject):
1657         """
1658         Subclasses override _wait() and _poll() so that calls
1659         to public methods can be wrapped for implementing
1660         hooks such as exit listener notification.
1661
1662         Sublasses should call self.wait() to notify exit listeners after
1663         the task is complete and self.returncode has been set.
1664         """
1665
1666         __slots__ = ("background", "cancelled", "returncode") + \
1667                 ("_exit_listeners", "_exit_listener_stack", "_start_listeners")
1668
1669         def start(self):
1670                 """
1671                 Start an asynchronous task and then return as soon as possible.
1672                 """
1673                 self._start()
1674                 self._start_hook()
1675
1676         def _start(self):
1677                 raise NotImplementedError(self)
1678
1679         def isAlive(self):
1680                 return self.returncode is None
1681
1682         def poll(self):
1683                 self._wait_hook()
1684                 return self._poll()
1685
1686         def _poll(self):
1687                 return self.returncode
1688
1689         def wait(self):
1690                 if self.returncode is None:
1691                         self._wait()
1692                 self._wait_hook()
1693                 return self.returncode
1694
1695         def _wait(self):
1696                 return self.returncode
1697
1698         def cancel(self):
1699                 self.cancelled = True
1700                 self.wait()
1701
1702         def addStartListener(self, f):
1703                 """
1704                 The function will be called with one argument, a reference to self.
1705                 """
1706                 if self._start_listeners is None:
1707                         self._start_listeners = []
1708                 self._start_listeners.append(f)
1709
1710         def removeStartListener(self, f):
1711                 if self._start_listeners is None:
1712                         return
1713                 self._start_listeners.remove(f)
1714
1715         def _start_hook(self):
1716                 if self._start_listeners is not None:
1717                         start_listeners = self._start_listeners
1718                         self._start_listeners = None
1719
1720                         for f in start_listeners:
1721                                 f(self)
1722
1723         def addExitListener(self, f):
1724                 """
1725                 The function will be called with one argument, a reference to self.
1726                 """
1727                 if self._exit_listeners is None:
1728                         self._exit_listeners = []
1729                 self._exit_listeners.append(f)
1730
1731         def removeExitListener(self, f):
1732                 if self._exit_listeners is None:
1733                         if self._exit_listener_stack is not None:
1734                                 self._exit_listener_stack.remove(f)
1735                         return
1736                 self._exit_listeners.remove(f)
1737
1738         def _wait_hook(self):
1739                 """
1740                 Call this method after the task completes, just before returning
1741                 the returncode from wait() or poll(). This hook is
1742                 used to trigger exit listeners when the returncode first
1743                 becomes available.
1744                 """
1745                 if self.returncode is not None and \
1746                         self._exit_listeners is not None:
1747
1748                         # This prevents recursion, in case one of the
1749                         # exit handlers triggers this method again by
1750                         # calling wait(). Use a stack that gives
1751                         # removeExitListener() an opportunity to consume
1752                         # listeners from the stack, before they can get
1753                         # called below. This is necessary because a call
1754                         # to one exit listener may result in a call to
1755                         # removeExitListener() for another listener on
1756                         # the stack. That listener needs to be removed
1757                         # from the stack since it would be inconsistent
1758                         # to call it after it has been been passed into
1759                         # removeExitListener().
1760                         self._exit_listener_stack = self._exit_listeners
1761                         self._exit_listeners = None
1762
1763                         self._exit_listener_stack.reverse()
1764                         while self._exit_listener_stack:
1765                                 self._exit_listener_stack.pop()(self)
1766
1767 class AbstractPollTask(AsynchronousTask):
1768
1769         __slots__ = ("scheduler",) + \
1770                 ("_registered",)
1771
1772         _bufsize = 4096
1773         _exceptional_events = PollConstants.POLLERR | PollConstants.POLLNVAL
1774         _registered_events = PollConstants.POLLIN | PollConstants.POLLHUP | \
1775                 _exceptional_events
1776
1777         def _unregister(self):
1778                 raise NotImplementedError(self)
1779
1780         def _unregister_if_appropriate(self, event):
1781                 if self._registered:
1782                         if event & self._exceptional_events:
1783                                 self._unregister()
1784                                 self.cancel()
1785                         elif event & PollConstants.POLLHUP:
1786                                 self._unregister()
1787                                 self.wait()
1788
1789 class PipeReader(AbstractPollTask):
1790
1791         """
1792         Reads output from one or more files and saves it in memory,
1793         for retrieval via the getvalue() method. This is driven by
1794         the scheduler's poll() loop, so it runs entirely within the
1795         current process.
1796         """
1797
1798         __slots__ = ("input_files",) + \
1799                 ("_read_data", "_reg_ids")
1800
1801         def _start(self):
1802                 self._reg_ids = set()
1803                 self._read_data = []
1804                 for k, f in self.input_files.iteritems():
1805                         fcntl.fcntl(f.fileno(), fcntl.F_SETFL,
1806                                 fcntl.fcntl(f.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
1807                         self._reg_ids.add(self.scheduler.register(f.fileno(),
1808                                 self._registered_events, self._output_handler))
1809                 self._registered = True
1810
1811         def isAlive(self):
1812                 return self._registered
1813
1814         def cancel(self):
1815                 if self.returncode is None:
1816                         self.returncode = 1
1817                         self.cancelled = True
1818                 self.wait()
1819
1820         def _wait(self):
1821                 if self.returncode is not None:
1822                         return self.returncode
1823
1824                 if self._registered:
1825                         self.scheduler.schedule(self._reg_ids)
1826                         self._unregister()
1827
1828                 self.returncode = os.EX_OK
1829                 return self.returncode
1830
1831         def getvalue(self):
1832                 """Retrieve the entire contents"""
1833                 return "".join(self._read_data)
1834
1835         def close(self):
1836                 """Free the memory buffer."""
1837                 self._read_data = None
1838
1839         def _output_handler(self, fd, event):
1840
1841                 if event & PollConstants.POLLIN:
1842
1843                         for f in self.input_files.itervalues():
1844                                 if fd == f.fileno():
1845                                         break
1846
1847                         buf = array.array('B')
1848                         try:
1849                                 buf.fromfile(f, self._bufsize)
1850                         except EOFError:
1851                                 pass
1852
1853                         if buf:
1854                                 self._read_data.append(buf.tostring())
1855                         else:
1856                                 self._unregister()
1857                                 self.wait()
1858
1859                 self._unregister_if_appropriate(event)
1860                 return self._registered
1861
1862         def _unregister(self):
1863                 """
1864                 Unregister from the scheduler and close open files.
1865                 """
1866
1867                 self._registered = False
1868
1869                 if self._reg_ids is not None:
1870                         for reg_id in self._reg_ids:
1871                                 self.scheduler.unregister(reg_id)
1872                         self._reg_ids = None
1873
1874                 if self.input_files is not None:
1875                         for f in self.input_files.itervalues():
1876                                 f.close()
1877                         self.input_files = None
1878
1879 class CompositeTask(AsynchronousTask):
1880
1881         __slots__ = ("scheduler",) + ("_current_task",)
1882
1883         def isAlive(self):
1884                 return self._current_task is not None
1885
1886         def cancel(self):
1887                 self.cancelled = True
1888                 if self._current_task is not None:
1889                         self._current_task.cancel()
1890
1891         def _poll(self):
1892                 """
1893                 This does a loop calling self._current_task.poll()
1894                 repeatedly as long as the value of self._current_task
1895                 keeps changing. It calls poll() a maximum of one time
1896                 for a given self._current_task instance. This is useful
1897                 since calling poll() on a task can trigger advance to
1898                 the next task could eventually lead to the returncode
1899                 being set in cases when polling only a single task would
1900                 not have the same effect.
1901                 """
1902
1903                 prev = None
1904                 while True:
1905                         task = self._current_task
1906                         if task is None or task is prev:
1907                                 # don't poll the same task more than once
1908                                 break
1909                         task.poll()
1910                         prev = task
1911
1912                 return self.returncode
1913
1914         def _wait(self):
1915
1916                 prev = None
1917                 while True:
1918                         task = self._current_task
1919                         if task is None:
1920                                 # don't wait for the same task more than once
1921                                 break
1922                         if task is prev:
1923                                 # Before the task.wait() method returned, an exit
1924                                 # listener should have set self._current_task to either
1925                                 # a different task or None. Something is wrong.
1926                                 raise AssertionError("self._current_task has not " + \
1927                                         "changed since calling wait", self, task)
1928                         task.wait()
1929                         prev = task
1930
1931                 return self.returncode
1932
1933         def _assert_current(self, task):
1934                 """
1935                 Raises an AssertionError if the given task is not the
1936                 same one as self._current_task. This can be useful
1937                 for detecting bugs.
1938                 """
1939                 if task is not self._current_task:
1940                         raise AssertionError("Unrecognized task: %s" % (task,))
1941
1942         def _default_exit(self, task):
1943                 """
1944                 Calls _assert_current() on the given task and then sets the
1945                 composite returncode attribute if task.returncode != os.EX_OK.
1946                 If the task failed then self._current_task will be set to None.
1947                 Subclasses can use this as a generic task exit callback.
1948
1949                 @rtype: int
1950                 @returns: The task.returncode attribute.
1951                 """
1952                 self._assert_current(task)
1953                 if task.returncode != os.EX_OK:
1954                         self.returncode = task.returncode
1955                         self._current_task = None
1956                 return task.returncode
1957
1958         def _final_exit(self, task):
1959                 """
1960                 Assumes that task is the final task of this composite task.
1961                 Calls _default_exit() and sets self.returncode to the task's
1962                 returncode and sets self._current_task to None.
1963                 """
1964                 self._default_exit(task)
1965                 self._current_task = None
1966                 self.returncode = task.returncode
1967                 return self.returncode
1968
1969         def _default_final_exit(self, task):
1970                 """
1971                 This calls _final_exit() and then wait().
1972
1973                 Subclasses can use this as a generic final task exit callback.
1974
1975                 """
1976                 self._final_exit(task)
1977                 return self.wait()
1978
1979         def _start_task(self, task, exit_handler):
1980                 """
1981                 Register exit handler for the given task, set it
1982                 as self._current_task, and call task.start().
1983
1984                 Subclasses can use this as a generic way to start
1985                 a task.
1986
1987                 """
1988                 task.addExitListener(exit_handler)
1989                 self._current_task = task
1990                 task.start()
1991
1992 class TaskSequence(CompositeTask):
1993         """
1994         A collection of tasks that executes sequentially. Each task
1995         must have a addExitListener() method that can be used as
1996         a means to trigger movement from one task to the next.
1997         """
1998
1999         __slots__ = ("_task_queue",)
2000
2001         def __init__(self, **kwargs):
2002                 AsynchronousTask.__init__(self, **kwargs)
2003                 self._task_queue = deque()
2004
2005         def add(self, task):
2006                 self._task_queue.append(task)
2007
2008         def _start(self):
2009                 self._start_next_task()
2010
2011         def cancel(self):
2012                 self._task_queue.clear()
2013                 CompositeTask.cancel(self)
2014
2015         def _start_next_task(self):
2016                 self._start_task(self._task_queue.popleft(),
2017                         self._task_exit_handler)
2018
2019         def _task_exit_handler(self, task):
2020                 if self._default_exit(task) != os.EX_OK:
2021                         self.wait()
2022                 elif self._task_queue:
2023                         self._start_next_task()
2024                 else:
2025                         self._final_exit(task)
2026                         self.wait()
2027
2028 class SubProcess(AbstractPollTask):
2029
2030         __slots__ = ("pid",) + \
2031                 ("_files", "_reg_id")
2032
2033         # A file descriptor is required for the scheduler to monitor changes from
2034         # inside a poll() loop. When logging is not enabled, create a pipe just to
2035         # serve this purpose alone.
2036         _dummy_pipe_fd = 9
2037
2038         def _poll(self):
2039                 if self.returncode is not None:
2040                         return self.returncode
2041                 if self.pid is None:
2042                         return self.returncode
2043                 if self._registered:
2044                         return self.returncode
2045
2046                 try:
2047                         retval = os.waitpid(self.pid, os.WNOHANG)
2048                 except OSError, e:
2049                         if e.errno != errno.ECHILD:
2050                                 raise
2051                         del e
2052                         retval = (self.pid, 1)
2053
2054                 if retval == (0, 0):
2055                         return None
2056                 self._set_returncode(retval)
2057                 return self.returncode
2058
2059         def cancel(self):
2060                 if self.isAlive():
2061                         try:
2062                                 os.kill(self.pid, signal.SIGTERM)
2063                         except OSError, e:
2064                                 if e.errno != errno.ESRCH:
2065                                         raise
2066                                 del e
2067
2068                 self.cancelled = True
2069                 if self.pid is not None:
2070                         self.wait()
2071                 return self.returncode
2072
2073         def isAlive(self):
2074                 return self.pid is not None and \
2075                         self.returncode is None
2076
2077         def _wait(self):
2078
2079                 if self.returncode is not None:
2080                         return self.returncode
2081
2082                 if self._registered:
2083                         self.scheduler.schedule(self._reg_id)
2084                         self._unregister()
2085                         if self.returncode is not None:
2086                                 return self.returncode
2087
2088                 try:
2089                         wait_retval = os.waitpid(self.pid, 0)
2090                 except OSError, e:
2091                         if e.errno != errno.ECHILD:
2092                                 raise
2093                         del e
2094                         self._set_returncode((self.pid, 1))
2095                 else:
2096                         self._set_returncode(wait_retval)
2097
2098                 return self.returncode
2099
2100         def _unregister(self):
2101                 """
2102                 Unregister from the scheduler and close open files.
2103                 """
2104
2105                 self._registered = False
2106
2107                 if self._reg_id is not None:
2108                         self.scheduler.unregister(self._reg_id)
2109                         self._reg_id = None
2110
2111                 if self._files is not None:
2112                         for f in self._files.itervalues():
2113                                 f.close()
2114                         self._files = None
2115
2116         def _set_returncode(self, wait_retval):
2117
2118                 retval = wait_retval[1]
2119
2120                 if retval != os.EX_OK:
2121                         if retval & 0xff:
2122                                 retval = (retval & 0xff) << 8
2123                         else:
2124                                 retval = retval >> 8
2125
2126                 self.returncode = retval
2127
2128 class SpawnProcess(SubProcess):
2129
2130         """
2131         Constructor keyword args are passed into portage.process.spawn().
2132         The required "args" keyword argument will be passed as the first
2133         spawn() argument.
2134         """
2135
2136         _spawn_kwarg_names = ("env", "opt_name", "fd_pipes",
2137                 "uid", "gid", "groups", "umask", "logfile",
2138                 "path_lookup", "pre_exec")
2139
2140         __slots__ = ("args",) + \
2141                 _spawn_kwarg_names
2142
2143         _file_names = ("log", "process", "stdout")
2144         _files_dict = slot_dict_class(_file_names, prefix="")
2145
2146         def _start(self):
2147
2148                 if self.cancelled:
2149                         return
2150
2151                 if self.fd_pipes is None:
2152                         self.fd_pipes = {}
2153                 fd_pipes = self.fd_pipes
2154                 fd_pipes.setdefault(0, sys.stdin.fileno())
2155                 fd_pipes.setdefault(1, sys.stdout.fileno())
2156                 fd_pipes.setdefault(2, sys.stderr.fileno())
2157
2158                 # flush any pending output
2159                 for fd in fd_pipes.itervalues():
2160                         if fd == sys.stdout.fileno():
2161                                 sys.stdout.flush()
2162                         if fd == sys.stderr.fileno():
2163                                 sys.stderr.flush()
2164
2165                 logfile = self.logfile
2166                 self._files = self._files_dict()
2167                 files = self._files
2168
2169                 master_fd, slave_fd = self._pipe(fd_pipes)
2170                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2171                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2172
2173                 null_input = None
2174                 fd_pipes_orig = fd_pipes.copy()
2175                 if self.background:
2176                         # TODO: Use job control functions like tcsetpgrp() to control
2177                         # access to stdin. Until then, use /dev/null so that any
2178                         # attempts to read from stdin will immediately return EOF
2179                         # instead of blocking indefinitely.
2180                         null_input = open('/dev/null', 'rb')
2181                         fd_pipes[0] = null_input.fileno()
2182                 else:
2183                         fd_pipes[0] = fd_pipes_orig[0]
2184
2185                 files.process = os.fdopen(master_fd, 'r')
2186                 if logfile is not None:
2187
2188                         fd_pipes[1] = slave_fd
2189                         fd_pipes[2] = slave_fd
2190
2191                         files.log = open(logfile, "a")
2192                         portage.util.apply_secpass_permissions(logfile,
2193                                 uid=portage.portage_uid, gid=portage.portage_gid,
2194                                 mode=0660)
2195
2196                         if not self.background:
2197                                 files.stdout = os.fdopen(os.dup(fd_pipes_orig[1]), 'w')
2198
2199                         output_handler = self._output_handler
2200
2201                 else:
2202
2203                         # Create a dummy pipe so the scheduler can monitor
2204                         # the process from inside a poll() loop.
2205                         fd_pipes[self._dummy_pipe_fd] = slave_fd
2206                         if self.background:
2207                                 fd_pipes[1] = slave_fd
2208                                 fd_pipes[2] = slave_fd
2209                         output_handler = self._dummy_handler
2210
2211                 kwargs = {}
2212                 for k in self._spawn_kwarg_names:
2213                         v = getattr(self, k)
2214                         if v is not None:
2215                                 kwargs[k] = v
2216
2217                 kwargs["fd_pipes"] = fd_pipes
2218                 kwargs["returnpid"] = True
2219                 kwargs.pop("logfile", None)
2220
2221                 self._reg_id = self.scheduler.register(files.process.fileno(),
2222                         self._registered_events, output_handler)
2223                 self._registered = True
2224
2225                 retval = self._spawn(self.args, **kwargs)
2226
2227                 os.close(slave_fd)
2228                 if null_input is not None:
2229                         null_input.close()
2230
2231                 if isinstance(retval, int):
2232                         # spawn failed
2233                         self._unregister()
2234                         self.returncode = retval
2235                         self.wait()
2236                         return
2237
2238                 self.pid = retval[0]
2239                 portage.process.spawned_pids.remove(self.pid)
2240
2241         def _pipe(self, fd_pipes):
2242                 """
2243                 @type fd_pipes: dict
2244                 @param fd_pipes: pipes from which to copy terminal size if desired.
2245                 """
2246                 return os.pipe()
2247
2248         def _spawn(self, args, **kwargs):
2249                 return portage.process.spawn(args, **kwargs)
2250
2251         def _output_handler(self, fd, event):
2252
2253                 if event & PollConstants.POLLIN:
2254
2255                         files = self._files
2256                         buf = array.array('B')
2257                         try:
2258                                 buf.fromfile(files.process, self._bufsize)
2259                         except EOFError:
2260                                 pass
2261
2262                         if buf:
2263                                 if not self.background:
2264                                         buf.tofile(files.stdout)
2265                                         files.stdout.flush()
2266                                 buf.tofile(files.log)
2267                                 files.log.flush()
2268                         else:
2269                                 self._unregister()
2270                                 self.wait()
2271
2272                 self._unregister_if_appropriate(event)
2273                 return self._registered
2274
2275         def _dummy_handler(self, fd, event):
2276                 """
2277                 This method is mainly interested in detecting EOF, since
2278                 the only purpose of the pipe is to allow the scheduler to
2279                 monitor the process from inside a poll() loop.
2280                 """
2281
2282                 if event & PollConstants.POLLIN:
2283
2284                         buf = array.array('B')
2285                         try:
2286                                 buf.fromfile(self._files.process, self._bufsize)
2287                         except EOFError:
2288                                 pass
2289
2290                         if buf:
2291                                 pass
2292                         else:
2293                                 self._unregister()
2294                                 self.wait()
2295
2296                 self._unregister_if_appropriate(event)
2297                 return self._registered
2298
2299 class MiscFunctionsProcess(SpawnProcess):
2300         """
2301         Spawns misc-functions.sh with an existing ebuild environment.
2302         """
2303
2304         __slots__ = ("commands", "phase", "pkg", "settings")
2305
2306         def _start(self):
2307                 settings = self.settings
2308                 settings.pop("EBUILD_PHASE", None)
2309                 portage_bin_path = settings["PORTAGE_BIN_PATH"]
2310                 misc_sh_binary = os.path.join(portage_bin_path,
2311                         os.path.basename(portage.const.MISC_SH_BINARY))
2312
2313                 self.args = [portage._shell_quote(misc_sh_binary)] + self.commands
2314                 self.logfile = settings.get("PORTAGE_LOG_FILE")
2315
2316                 portage._doebuild_exit_status_unlink(
2317                         settings.get("EBUILD_EXIT_STATUS_FILE"))
2318
2319                 SpawnProcess._start(self)
2320
2321         def _spawn(self, args, **kwargs):
2322                 settings = self.settings
2323                 debug = settings.get("PORTAGE_DEBUG") == "1"
2324                 return portage.spawn(" ".join(args), settings,
2325                         debug=debug, **kwargs)
2326
2327         def _set_returncode(self, wait_retval):
2328                 SpawnProcess._set_returncode(self, wait_retval)
2329                 self.returncode = portage._doebuild_exit_status_check_and_log(
2330                         self.settings, self.phase, self.returncode)
2331
2332 class EbuildFetcher(SpawnProcess):
2333
2334         __slots__ = ("config_pool", "fetchonly", "fetchall", "pkg", "prefetch") + \
2335                 ("_build_dir",)
2336
2337         def _start(self):
2338
2339                 root_config = self.pkg.root_config
2340                 portdb = root_config.trees["porttree"].dbapi
2341                 ebuild_path = portdb.findname(self.pkg.cpv)
2342                 settings = self.config_pool.allocate()
2343                 self._build_dir = EbuildBuildDir(pkg=self.pkg, settings=settings)
2344                 self._build_dir.lock()
2345                 self._build_dir.clean()
2346                 portage.prepare_build_dirs(self.pkg.root, self._build_dir.settings, 0)
2347                 if self.logfile is None:
2348                         self.logfile = settings.get("PORTAGE_LOG_FILE")
2349
2350                 phase = "fetch"
2351                 if self.fetchall:
2352                         phase = "fetchall"
2353
2354                 # If any incremental variables have been overridden
2355                 # via the environment, those values need to be passed
2356                 # along here so that they are correctly considered by
2357                 # the config instance in the subproccess.
2358                 fetch_env = os.environ.copy()
2359
2360                 fetch_env["PORTAGE_NICENESS"] = "0"
2361                 if self.prefetch:
2362                         fetch_env["PORTAGE_PARALLEL_FETCHONLY"] = "1"
2363
2364                 ebuild_binary = os.path.join(
2365                         settings["PORTAGE_BIN_PATH"], "ebuild")
2366
2367                 fetch_args = [ebuild_binary, ebuild_path, phase]
2368                 debug = settings.get("PORTAGE_DEBUG") == "1"
2369                 if debug:
2370                         fetch_args.append("--debug")
2371
2372                 self.args = fetch_args
2373                 self.env = fetch_env
2374                 SpawnProcess._start(self)
2375
2376         def _pipe(self, fd_pipes):
2377                 """When appropriate, use a pty so that fetcher progress bars,
2378                 like wget has, will work properly."""
2379                 if self.background or not sys.stdout.isatty():
2380                         # When the output only goes to a log file,
2381                         # there's no point in creating a pty.
2382                         return os.pipe()
2383                 stdout_pipe = fd_pipes.get(1)
2384                 got_pty, master_fd, slave_fd = \
2385                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2386                 return (master_fd, slave_fd)
2387
2388         def _set_returncode(self, wait_retval):
2389                 SpawnProcess._set_returncode(self, wait_retval)
2390                 # Collect elog messages that might have been
2391                 # created by the pkg_nofetch phase.
2392                 if self._build_dir is not None:
2393                         # Skip elog messages for prefetch, in order to avoid duplicates.
2394                         if not self.prefetch and self.returncode != os.EX_OK:
2395                                 elog_out = None
2396                                 if self.logfile is not None:
2397                                         if self.background:
2398                                                 elog_out = open(self.logfile, 'a')
2399                                 msg = "Fetch failed for '%s'" % (self.pkg.cpv,)
2400                                 if self.logfile is not None:
2401                                         msg += ", Log file:"
2402                                 eerror(msg, phase="unpack", key=self.pkg.cpv, out=elog_out)
2403                                 if self.logfile is not None:
2404                                         eerror(" '%s'" % (self.logfile,),
2405                                                 phase="unpack", key=self.pkg.cpv, out=elog_out)
2406                                 if elog_out is not None:
2407                                         elog_out.close()
2408                         if not self.prefetch:
2409                                 portage.elog.elog_process(self.pkg.cpv, self._build_dir.settings)
2410                         features = self._build_dir.settings.features
2411                         if self.returncode == os.EX_OK:
2412                                 self._build_dir.clean()
2413                         self._build_dir.unlock()
2414                         self.config_pool.deallocate(self._build_dir.settings)
2415                         self._build_dir = None
2416
2417 class EbuildBuildDir(SlotObject):
2418
2419         __slots__ = ("dir_path", "pkg", "settings",
2420                 "locked", "_catdir", "_lock_obj")
2421
2422         def __init__(self, **kwargs):
2423                 SlotObject.__init__(self, **kwargs)
2424                 self.locked = False
2425
2426         def lock(self):
2427                 """
2428                 This raises an AlreadyLocked exception if lock() is called
2429                 while a lock is already held. In order to avoid this, call
2430                 unlock() or check whether the "locked" attribute is True
2431                 or False before calling lock().
2432                 """
2433                 if self._lock_obj is not None:
2434                         raise self.AlreadyLocked((self._lock_obj,))
2435
2436                 dir_path = self.dir_path
2437                 if dir_path is None:
2438                         root_config = self.pkg.root_config
2439                         portdb = root_config.trees["porttree"].dbapi
2440                         ebuild_path = portdb.findname(self.pkg.cpv)
2441                         settings = self.settings
2442                         settings.setcpv(self.pkg)
2443                         debug = settings.get("PORTAGE_DEBUG") == "1"
2444                         use_cache = 1 # always true
2445                         portage.doebuild_environment(ebuild_path, "setup", root_config.root,
2446                                 self.settings, debug, use_cache, portdb)
2447                         dir_path = self.settings["PORTAGE_BUILDDIR"]
2448
2449                 catdir = os.path.dirname(dir_path)
2450                 self._catdir = catdir
2451
2452                 portage.util.ensure_dirs(os.path.dirname(catdir),
2453                         gid=portage.portage_gid,
2454                         mode=070, mask=0)
2455                 catdir_lock = None
2456                 try:
2457                         catdir_lock = portage.locks.lockdir(catdir)
2458                         portage.util.ensure_dirs(catdir,
2459                                 gid=portage.portage_gid,
2460                                 mode=070, mask=0)
2461                         self._lock_obj = portage.locks.lockdir(dir_path)
2462                 finally:
2463                         self.locked = self._lock_obj is not None
2464                         if catdir_lock is not None:
2465                                 portage.locks.unlockdir(catdir_lock)
2466
2467         def clean(self):
2468                 """Uses shutil.rmtree() rather than spawning a 'clean' phase. Disabled
2469                 by keepwork or keeptemp in FEATURES."""
2470                 settings = self.settings
2471                 features = settings.features
2472                 if not ("keepwork" in features or "keeptemp" in features):
2473                         try:
2474                                 shutil.rmtree(settings["PORTAGE_BUILDDIR"])
2475                         except EnvironmentError, e:
2476                                 if e.errno != errno.ENOENT:
2477                                         raise
2478                                 del e
2479
2480         def unlock(self):
2481                 if self._lock_obj is None:
2482                         return
2483
2484                 portage.locks.unlockdir(self._lock_obj)
2485                 self._lock_obj = None
2486                 self.locked = False
2487
2488                 catdir = self._catdir
2489                 catdir_lock = None
2490                 try:
2491                         catdir_lock = portage.locks.lockdir(catdir)
2492                 finally:
2493                         if catdir_lock:
2494                                 try:
2495                                         os.rmdir(catdir)
2496                                 except OSError, e:
2497                                         if e.errno not in (errno.ENOENT,
2498                                                 errno.ENOTEMPTY, errno.EEXIST):
2499                                                 raise
2500                                         del e
2501                                 portage.locks.unlockdir(catdir_lock)
2502
2503         class AlreadyLocked(portage.exception.PortageException):
2504                 pass
2505
2506 class EbuildBuild(CompositeTask):
2507
2508         __slots__ = ("args_set", "config_pool", "find_blockers",
2509                 "ldpath_mtimes", "logger", "opts", "pkg", "pkg_count",
2510                 "prefetcher", "settings", "world_atom") + \
2511                 ("_build_dir", "_buildpkg", "_ebuild_path", "_issyspkg", "_tree")
2512
2513         def _start(self):
2514
2515                 logger = self.logger
2516                 opts = self.opts
2517                 pkg = self.pkg
2518                 settings = self.settings
2519                 world_atom = self.world_atom
2520                 root_config = pkg.root_config
2521                 tree = "porttree"
2522                 self._tree = tree
2523                 portdb = root_config.trees[tree].dbapi
2524                 settings.setcpv(pkg)
2525                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
2526                 ebuild_path = portdb.findname(self.pkg.cpv)
2527                 self._ebuild_path = ebuild_path
2528
2529                 prefetcher = self.prefetcher
2530                 if prefetcher is None:
2531                         pass
2532                 elif not prefetcher.isAlive():
2533                         prefetcher.cancel()
2534                 elif prefetcher.poll() is None:
2535
2536                         waiting_msg = "Fetching files " + \
2537                                 "in the background. " + \
2538                                 "To view fetch progress, run `tail -f " + \
2539                                 "/var/log/emerge-fetch.log` in another " + \
2540                                 "terminal."
2541                         msg_prefix = colorize("GOOD", " * ")
2542                         from textwrap import wrap
2543                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
2544                                 for line in wrap(waiting_msg, 65))
2545                         if not self.background:
2546                                 writemsg(waiting_msg, noiselevel=-1)
2547
2548                         self._current_task = prefetcher
2549                         prefetcher.addExitListener(self._prefetch_exit)
2550                         return
2551
2552                 self._prefetch_exit(prefetcher)
2553
2554         def _prefetch_exit(self, prefetcher):
2555
2556                 opts = self.opts
2557                 pkg = self.pkg
2558                 settings = self.settings
2559
2560                 if opts.fetchonly:
2561                                 fetcher = EbuildFetchonly(
2562                                         fetch_all=opts.fetch_all_uri,
2563                                         pkg=pkg, pretend=opts.pretend,
2564                                         settings=settings)
2565                                 retval = fetcher.execute()
2566                                 self.returncode = retval
2567                                 self.wait()
2568                                 return
2569
2570                 fetcher = EbuildFetcher(config_pool=self.config_pool,
2571                         fetchall=opts.fetch_all_uri,
2572                         fetchonly=opts.fetchonly,
2573                         background=self.background,
2574                         pkg=pkg, scheduler=self.scheduler)
2575
2576                 self._start_task(fetcher, self._fetch_exit)
2577
2578         def _fetch_exit(self, fetcher):
2579                 opts = self.opts
2580                 pkg = self.pkg
2581
2582                 fetch_failed = False
2583                 if opts.fetchonly:
2584                         fetch_failed = self._final_exit(fetcher) != os.EX_OK
2585                 else:
2586                         fetch_failed = self._default_exit(fetcher) != os.EX_OK
2587
2588                 if fetch_failed and fetcher.logfile is not None and \
2589                         os.path.exists(fetcher.logfile):
2590                         self.settings["PORTAGE_LOG_FILE"] = fetcher.logfile
2591
2592                 if not fetch_failed and fetcher.logfile is not None:
2593                         # Fetch was successful, so remove the fetch log.
2594                         try:
2595                                 os.unlink(fetcher.logfile)
2596                         except OSError:
2597                                 pass
2598
2599                 if fetch_failed or opts.fetchonly:
2600                         self.wait()
2601                         return
2602
2603                 logger = self.logger
2604                 opts = self.opts
2605                 pkg_count = self.pkg_count
2606                 scheduler = self.scheduler
2607                 settings = self.settings
2608                 features = settings.features
2609                 ebuild_path = self._ebuild_path
2610                 system_set = pkg.root_config.sets["system"]
2611
2612                 self._build_dir = EbuildBuildDir(pkg=pkg, settings=settings)
2613                 self._build_dir.lock()
2614
2615                 # Cleaning is triggered before the setup
2616                 # phase, in portage.doebuild().
2617                 msg = " === (%s of %s) Cleaning (%s::%s)" % \
2618                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2619                 short_msg = "emerge: (%s of %s) %s Clean" % \
2620                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2621                 logger.log(msg, short_msg=short_msg)
2622
2623                 #buildsyspkg: Check if we need to _force_ binary package creation
2624                 self._issyspkg = "buildsyspkg" in features and \
2625                                 system_set.findAtomForPackage(pkg) and \
2626                                 not opts.buildpkg
2627
2628                 if opts.buildpkg or self._issyspkg:
2629
2630                         self._buildpkg = True
2631
2632                         msg = " === (%s of %s) Compiling/Packaging (%s::%s)" % \
2633                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2634                         short_msg = "emerge: (%s of %s) %s Compile" % \
2635                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2636                         logger.log(msg, short_msg=short_msg)
2637
2638                 else:
2639                         msg = " === (%s of %s) Compiling/Merging (%s::%s)" % \
2640                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, ebuild_path)
2641                         short_msg = "emerge: (%s of %s) %s Compile" % \
2642                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2643                         logger.log(msg, short_msg=short_msg)
2644
2645                 build = EbuildExecuter(background=self.background, pkg=pkg,
2646                         scheduler=scheduler, settings=settings)
2647                 self._start_task(build, self._build_exit)
2648
2649         def _unlock_builddir(self):
2650                 portage.elog.elog_process(self.pkg.cpv, self.settings)
2651                 self._build_dir.unlock()
2652
2653         def _build_exit(self, build):
2654                 if self._default_exit(build) != os.EX_OK:
2655                         self._unlock_builddir()
2656                         self.wait()
2657                         return
2658
2659                 opts = self.opts
2660                 buildpkg = self._buildpkg
2661
2662                 if not buildpkg:
2663                         self._final_exit(build)
2664                         self.wait()
2665                         return
2666
2667                 if self._issyspkg:
2668                         msg = ">>> This is a system package, " + \
2669                                 "let's pack a rescue tarball.\n"
2670
2671                         log_path = self.settings.get("PORTAGE_LOG_FILE")
2672                         if log_path is not None:
2673                                 log_file = open(log_path, 'a')
2674                                 try:
2675                                         log_file.write(msg)
2676                                 finally:
2677                                         log_file.close()
2678
2679                         if not self.background:
2680                                 portage.writemsg_stdout(msg, noiselevel=-1)
2681
2682                 packager = EbuildBinpkg(background=self.background, pkg=self.pkg,
2683                         scheduler=self.scheduler, settings=self.settings)
2684
2685                 self._start_task(packager, self._buildpkg_exit)
2686
2687         def _buildpkg_exit(self, packager):
2688                 """
2689                 Released build dir lock when there is a failure or
2690                 when in buildpkgonly mode. Otherwise, the lock will
2691                 be released when merge() is called.
2692                 """
2693
2694                 if self._default_exit(packager) != os.EX_OK:
2695                         self._unlock_builddir()
2696                         self.wait()
2697                         return
2698
2699                 if self.opts.buildpkgonly:
2700                         # Need to call "clean" phase for buildpkgonly mode
2701                         portage.elog.elog_process(self.pkg.cpv, self.settings)
2702                         phase = "clean"
2703                         clean_phase = EbuildPhase(background=self.background,
2704                                 pkg=self.pkg, phase=phase,
2705                                 scheduler=self.scheduler, settings=self.settings,
2706                                 tree=self._tree)
2707                         self._start_task(clean_phase, self._clean_exit)
2708                         return
2709
2710                 # Continue holding the builddir lock until
2711                 # after the package has been installed.
2712                 self._current_task = None
2713                 self.returncode = packager.returncode
2714                 self.wait()
2715
2716         def _clean_exit(self, clean_phase):
2717                 if self._final_exit(clean_phase) != os.EX_OK or \
2718                         self.opts.buildpkgonly:
2719                         self._unlock_builddir()
2720                 self.wait()
2721
2722         def install(self):
2723                 """
2724                 Install the package and then clean up and release locks.
2725                 Only call this after the build has completed successfully
2726                 and neither fetchonly nor buildpkgonly mode are enabled.
2727                 """
2728
2729                 find_blockers = self.find_blockers
2730                 ldpath_mtimes = self.ldpath_mtimes
2731                 logger = self.logger
2732                 pkg = self.pkg
2733                 pkg_count = self.pkg_count
2734                 settings = self.settings
2735                 world_atom = self.world_atom
2736                 ebuild_path = self._ebuild_path
2737                 tree = self._tree
2738
2739                 merge = EbuildMerge(find_blockers=self.find_blockers,
2740                         ldpath_mtimes=ldpath_mtimes, logger=logger, pkg=pkg,
2741                         pkg_count=pkg_count, pkg_path=ebuild_path,
2742                         scheduler=self.scheduler,
2743                         settings=settings, tree=tree, world_atom=world_atom)
2744
2745                 msg = " === (%s of %s) Merging (%s::%s)" % \
2746                         (pkg_count.curval, pkg_count.maxval,
2747                         pkg.cpv, ebuild_path)
2748                 short_msg = "emerge: (%s of %s) %s Merge" % \
2749                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
2750                 logger.log(msg, short_msg=short_msg)
2751
2752                 try:
2753                         rval = merge.execute()
2754                 finally:
2755                         self._unlock_builddir()
2756
2757                 return rval
2758
2759 class EbuildExecuter(CompositeTask):
2760
2761         __slots__ = ("pkg", "scheduler", "settings") + ("_tree",)
2762
2763         _phases = ("prepare", "configure", "compile", "test", "install")
2764
2765         _live_eclasses = frozenset([
2766                 "bzr",
2767                 "cvs",
2768                 "darcs",
2769                 "git",
2770                 "mercurial",
2771                 "subversion"
2772         ])
2773
2774         def _start(self):
2775                 self._tree = "porttree"
2776                 pkg = self.pkg
2777                 phase = "clean"
2778                 clean_phase = EbuildPhase(background=self.background, pkg=pkg, phase=phase,
2779                         scheduler=self.scheduler, settings=self.settings, tree=self._tree)
2780                 self._start_task(clean_phase, self._clean_phase_exit)
2781
2782         def _clean_phase_exit(self, clean_phase):
2783
2784                 if self._default_exit(clean_phase) != os.EX_OK:
2785                         self.wait()
2786                         return
2787
2788                 pkg = self.pkg
2789                 scheduler = self.scheduler
2790                 settings = self.settings
2791                 cleanup = 1
2792
2793                 # This initializes PORTAGE_LOG_FILE.
2794                 portage.prepare_build_dirs(pkg.root, settings, cleanup)
2795
2796                 setup_phase = EbuildPhase(background=self.background,
2797                         pkg=pkg, phase="setup", scheduler=scheduler,
2798                         settings=settings, tree=self._tree)
2799
2800                 setup_phase.addExitListener(self._setup_exit)
2801                 self._current_task = setup_phase
2802                 self.scheduler.scheduleSetup(setup_phase)
2803
2804         def _setup_exit(self, setup_phase):
2805
2806                 if self._default_exit(setup_phase) != os.EX_OK:
2807                         self.wait()
2808                         return
2809
2810                 unpack_phase = EbuildPhase(background=self.background,
2811                         pkg=self.pkg, phase="unpack", scheduler=self.scheduler,
2812                         settings=self.settings, tree=self._tree)
2813
2814                 if self._live_eclasses.intersection(self.pkg.inherited):
2815                         # Serialize $DISTDIR access for live ebuilds since
2816                         # otherwise they can interfere with eachother.
2817
2818                         unpack_phase.addExitListener(self._unpack_exit)
2819                         self._current_task = unpack_phase
2820                         self.scheduler.scheduleUnpack(unpack_phase)
2821
2822                 else:
2823                         self._start_task(unpack_phase, self._unpack_exit)
2824
2825         def _unpack_exit(self, unpack_phase):
2826
2827                 if self._default_exit(unpack_phase) != os.EX_OK:
2828                         self.wait()
2829                         return
2830
2831                 ebuild_phases = TaskSequence(scheduler=self.scheduler)
2832
2833                 pkg = self.pkg
2834                 phases = self._phases
2835                 eapi = pkg.metadata["EAPI"]
2836                 if eapi in ("0", "1", "2_pre1"):
2837                         # skip src_prepare and src_configure
2838                         phases = phases[2:]
2839                 elif eapi in ("2_pre2",):
2840                         # skip src_prepare
2841                         phases = phases[1:]
2842
2843                 for phase in phases:
2844                         ebuild_phases.add(EbuildPhase(background=self.background,
2845                                 pkg=self.pkg, phase=phase, scheduler=self.scheduler,
2846                                 settings=self.settings, tree=self._tree))
2847
2848                 self._start_task(ebuild_phases, self._default_final_exit)
2849
2850 class EbuildMetadataPhase(SubProcess):
2851
2852         """
2853         Asynchronous interface for the ebuild "depend" phase which is
2854         used to extract metadata from the ebuild.
2855         """
2856
2857         __slots__ = ("cpv", "ebuild_path", "fd_pipes", "metadata_callback",
2858                 "ebuild_mtime", "portdb", "repo_path", "settings") + \
2859                 ("_raw_metadata",)
2860
2861         _file_names = ("ebuild",)
2862         _files_dict = slot_dict_class(_file_names, prefix="")
2863         _metadata_fd = 9
2864
2865         def _start(self):
2866                 settings = self.settings
2867                 settings.reset()
2868                 ebuild_path = self.ebuild_path
2869                 debug = settings.get("PORTAGE_DEBUG") == "1"
2870                 master_fd = None
2871                 slave_fd = None
2872                 fd_pipes = None
2873                 if self.fd_pipes is not None:
2874                         fd_pipes = self.fd_pipes.copy()
2875                 else:
2876                         fd_pipes = {}
2877
2878                 fd_pipes.setdefault(0, sys.stdin.fileno())
2879                 fd_pipes.setdefault(1, sys.stdout.fileno())
2880                 fd_pipes.setdefault(2, sys.stderr.fileno())
2881
2882                 # flush any pending output
2883                 for fd in fd_pipes.itervalues():
2884                         if fd == sys.stdout.fileno():
2885                                 sys.stdout.flush()
2886                         if fd == sys.stderr.fileno():
2887                                 sys.stderr.flush()
2888
2889                 fd_pipes_orig = fd_pipes.copy()
2890                 self._files = self._files_dict()
2891                 files = self._files
2892
2893                 master_fd, slave_fd = os.pipe()
2894                 fcntl.fcntl(master_fd, fcntl.F_SETFL,
2895                         fcntl.fcntl(master_fd, fcntl.F_GETFL) | os.O_NONBLOCK)
2896
2897                 fd_pipes[self._metadata_fd] = slave_fd
2898
2899                 self._raw_metadata = []
2900                 files.ebuild = os.fdopen(master_fd, 'r')
2901                 self._reg_id = self.scheduler.register(files.ebuild.fileno(),
2902                         self._registered_events, self._output_handler)
2903                 self._registered = True
2904
2905                 retval = portage.doebuild(ebuild_path, "depend",
2906                         settings["ROOT"], settings, debug,
2907                         mydbapi=self.portdb, tree="porttree",
2908                         fd_pipes=fd_pipes, returnpid=True)
2909
2910                 os.close(slave_fd)
2911
2912                 if isinstance(retval, int):
2913                         # doebuild failed before spawning
2914                         self._unregister()
2915                         self.returncode = retval
2916                         self.wait()
2917                         return
2918
2919                 self.pid = retval[0]
2920                 portage.process.spawned_pids.remove(self.pid)
2921
2922         def _output_handler(self, fd, event):
2923
2924                 if event & PollConstants.POLLIN:
2925                         self._raw_metadata.append(self._files.ebuild.read())
2926                         if not self._raw_metadata[-1]:
2927                                 self._unregister()
2928                                 self.wait()
2929
2930                 self._unregister_if_appropriate(event)
2931                 return self._registered
2932
2933         def _set_returncode(self, wait_retval):
2934                 SubProcess._set_returncode(self, wait_retval)
2935                 if self.returncode == os.EX_OK:
2936                         metadata_lines = "".join(self._raw_metadata).splitlines()
2937                         if len(portage.auxdbkeys) != len(metadata_lines):
2938                                 # Don't trust bash's returncode if the
2939                                 # number of lines is incorrect.
2940                                 self.returncode = 1
2941                         else:
2942                                 metadata = izip(portage.auxdbkeys, metadata_lines)
2943                                 self.metadata_callback(self.cpv, self.ebuild_path,
2944                                         self.repo_path, metadata, self.ebuild_mtime)
2945
2946 class EbuildProcess(SpawnProcess):
2947
2948         __slots__ = ("phase", "pkg", "settings", "tree")
2949
2950         def _start(self):
2951                 # Don't open the log file during the clean phase since the
2952                 # open file can result in an nfs lock on $T/build.log which
2953                 # prevents the clean phase from removing $T.
2954                 if self.phase not in ("clean", "cleanrm"):
2955                         self.logfile = self.settings.get("PORTAGE_LOG_FILE")
2956                 SpawnProcess._start(self)
2957
2958         def _pipe(self, fd_pipes):
2959                 stdout_pipe = fd_pipes.get(1)
2960                 got_pty, master_fd, slave_fd = \
2961                         portage._create_pty_or_pipe(copy_term_size=stdout_pipe)
2962                 return (master_fd, slave_fd)
2963
2964         def _spawn(self, args, **kwargs):
2965
2966                 root_config = self.pkg.root_config
2967                 tree = self.tree
2968                 mydbapi = root_config.trees[tree].dbapi
2969                 settings = self.settings
2970                 ebuild_path = settings["EBUILD"]
2971                 debug = settings.get("PORTAGE_DEBUG") == "1"
2972
2973                 rval = portage.doebuild(ebuild_path, self.phase,
2974                         root_config.root, settings, debug,
2975                         mydbapi=mydbapi, tree=tree, **kwargs)
2976
2977                 return rval
2978
2979         def _set_returncode(self, wait_retval):
2980                 SpawnProcess._set_returncode(self, wait_retval)
2981
2982                 if self.phase not in ("clean", "cleanrm"):
2983                         self.returncode = portage._doebuild_exit_status_check_and_log(
2984                                 self.settings, self.phase, self.returncode)
2985
2986                 if self.phase == "test" and self.returncode != os.EX_OK and \
2987                         "test-fail-continue" in self.settings.features:
2988                         self.returncode = os.EX_OK
2989
2990                 portage._post_phase_userpriv_perms(self.settings)
2991
2992 class EbuildPhase(CompositeTask):
2993
2994         __slots__ = ("background", "pkg", "phase",
2995                 "scheduler", "settings", "tree")
2996
2997         _post_phase_cmds = portage._post_phase_cmds
2998
2999         def _start(self):
3000
3001                 ebuild_process = EbuildProcess(background=self.background,
3002                         pkg=self.pkg, phase=self.phase, scheduler=self.scheduler,
3003                         settings=self.settings, tree=self.tree)
3004
3005                 self._start_task(ebuild_process, self._ebuild_exit)
3006
3007         def _ebuild_exit(self, ebuild_process):
3008
3009                 if self.phase == "install":
3010                         out = None
3011                         log_path = self.settings.get("PORTAGE_LOG_FILE")
3012                         log_file = None
3013                         if self.background and log_path is not None:
3014                                 log_file = open(log_path, 'a')
3015                                 out = log_file
3016                         try:
3017                                 portage._check_build_log(self.settings, out=out)
3018                         finally:
3019                                 if log_file is not None:
3020                                         log_file.close()
3021
3022                 if self._default_exit(ebuild_process) != os.EX_OK:
3023                         self.wait()
3024                         return
3025
3026                 settings = self.settings
3027
3028                 if self.phase == "install":
3029                         portage._post_src_install_uid_fix(settings)
3030
3031                 post_phase_cmds = self._post_phase_cmds.get(self.phase)
3032                 if post_phase_cmds is not None:
3033                         post_phase = MiscFunctionsProcess(background=self.background,
3034                                 commands=post_phase_cmds, phase=self.phase, pkg=self.pkg,
3035                                 scheduler=self.scheduler, settings=settings)
3036                         self._start_task(post_phase, self._post_phase_exit)
3037                         return
3038
3039                 self.returncode = ebuild_process.returncode
3040                 self._current_task = None
3041                 self.wait()
3042
3043         def _post_phase_exit(self, post_phase):
3044                 if self._final_exit(post_phase) != os.EX_OK:
3045                         writemsg("!!! post %s failed; exiting.\n" % self.phase,
3046                                 noiselevel=-1)
3047                 self._current_task = None
3048                 self.wait()
3049                 return
3050
3051 class EbuildBinpkg(EbuildProcess):
3052         """
3053         This assumes that src_install() has successfully completed.
3054         """
3055         __slots__ = ("_binpkg_tmpfile",)
3056
3057         def _start(self):
3058                 self.phase = "package"
3059                 self.tree = "porttree"
3060                 pkg = self.pkg
3061                 root_config = pkg.root_config
3062                 portdb = root_config.trees["porttree"].dbapi
3063                 bintree = root_config.trees["bintree"]
3064                 ebuild_path = portdb.findname(self.pkg.cpv)
3065                 settings = self.settings
3066                 debug = settings.get("PORTAGE_DEBUG") == "1"
3067
3068                 bintree.prevent_collision(pkg.cpv)
3069                 binpkg_tmpfile = os.path.join(bintree.pkgdir,
3070                         pkg.cpv + ".tbz2." + str(os.getpid()))
3071                 self._binpkg_tmpfile = binpkg_tmpfile
3072                 settings["PORTAGE_BINPKG_TMPFILE"] = binpkg_tmpfile
3073                 settings.backup_changes("PORTAGE_BINPKG_TMPFILE")
3074
3075                 try:
3076                         EbuildProcess._start(self)
3077                 finally:
3078                         settings.pop("PORTAGE_BINPKG_TMPFILE", None)
3079
3080         def _set_returncode(self, wait_retval):
3081                 EbuildProcess._set_returncode(self, wait_retval)
3082
3083                 pkg = self.pkg
3084                 bintree = pkg.root_config.trees["bintree"]
3085                 binpkg_tmpfile = self._binpkg_tmpfile
3086                 if self.returncode == os.EX_OK:
3087                         bintree.inject(pkg.cpv, filename=binpkg_tmpfile)
3088
3089 class EbuildMerge(SlotObject):
3090
3091         __slots__ = ("find_blockers", "logger", "ldpath_mtimes",
3092                 "pkg", "pkg_count", "pkg_path", "pretend",
3093                 "scheduler", "settings", "tree", "world_atom")
3094
3095         def execute(self):
3096                 root_config = self.pkg.root_config
3097                 settings = self.settings
3098                 retval = portage.merge(settings["CATEGORY"],
3099                         settings["PF"], settings["D"],
3100                         os.path.join(settings["PORTAGE_BUILDDIR"],
3101                         "build-info"), root_config.root, settings,
3102                         myebuild=settings["EBUILD"],
3103                         mytree=self.tree, mydbapi=root_config.trees[self.tree].dbapi,
3104                         vartree=root_config.trees["vartree"],
3105                         prev_mtimes=self.ldpath_mtimes,
3106                         scheduler=self.scheduler,
3107                         blockers=self.find_blockers)
3108
3109                 if retval == os.EX_OK:
3110                         self.world_atom(self.pkg)
3111                         self._log_success()
3112
3113                 return retval
3114
3115         def _log_success(self):
3116                 pkg = self.pkg
3117                 pkg_count = self.pkg_count
3118                 pkg_path = self.pkg_path
3119                 logger = self.logger
3120                 if "noclean" not in self.settings.features:
3121                         short_msg = "emerge: (%s of %s) %s Clean Post" % \
3122                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3123                         logger.log((" === (%s of %s) " + \
3124                                 "Post-Build Cleaning (%s::%s)") % \
3125                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path),
3126                                 short_msg=short_msg)
3127                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
3128                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3129
3130 class PackageUninstall(AsynchronousTask):
3131
3132         __slots__ = ("ldpath_mtimes", "opts", "pkg", "scheduler", "settings")
3133
3134         def _start(self):
3135                 try:
3136                         unmerge(self.pkg.root_config, self.opts, "unmerge",
3137                                 [self.pkg.cpv], self.ldpath_mtimes, clean_world=0,
3138                                 clean_delay=0, raise_on_error=1, scheduler=self.scheduler,
3139                                 writemsg_level=self._writemsg_level)
3140                 except UninstallFailure, e:
3141                         self.returncode = e.status
3142                 else:
3143                         self.returncode = os.EX_OK
3144                 self.wait()
3145
3146         def _writemsg_level(self, msg, level=0, noiselevel=0):
3147
3148                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3149                 background = self.background
3150
3151                 if log_path is None:
3152                         if not (background and level < logging.WARNING):
3153                                 portage.util.writemsg_level(msg,
3154                                         level=level, noiselevel=noiselevel)
3155                 else:
3156                         if not background:
3157                                 portage.util.writemsg_level(msg,
3158                                         level=level, noiselevel=noiselevel)
3159
3160                         f = open(log_path, 'a')
3161                         try:
3162                                 f.write(msg)
3163                         finally:
3164                                 f.close()
3165
3166 class Binpkg(CompositeTask):
3167
3168         __slots__ = ("find_blockers",
3169                 "ldpath_mtimes", "logger", "opts",
3170                 "pkg", "pkg_count", "prefetcher", "settings", "world_atom") + \
3171                 ("_bintree", "_build_dir", "_ebuild_path", "_fetched_pkg",
3172                 "_image_dir", "_infloc", "_pkg_path", "_tree", "_verify")
3173
3174         def _writemsg_level(self, msg, level=0, noiselevel=0):
3175
3176                 if not self.background:
3177                         portage.util.writemsg_level(msg,
3178                                 level=level, noiselevel=noiselevel)
3179
3180                 log_path = self.settings.get("PORTAGE_LOG_FILE")
3181                 if  log_path is not None:
3182                         f = open(log_path, 'a')
3183                         try:
3184                                 f.write(msg)
3185                         finally:
3186                                 f.close()
3187
3188         def _start(self):
3189
3190                 pkg = self.pkg
3191                 settings = self.settings
3192                 settings.setcpv(pkg)
3193                 self._tree = "bintree"
3194                 self._bintree = self.pkg.root_config.trees[self._tree]
3195                 self._verify = not self.opts.pretend
3196
3197                 dir_path = os.path.join(settings["PORTAGE_TMPDIR"],
3198                         "portage", pkg.category, pkg.pf)
3199                 self._build_dir = EbuildBuildDir(dir_path=dir_path,
3200                         pkg=pkg, settings=settings)
3201                 self._image_dir = os.path.join(dir_path, "image")
3202                 self._infloc = os.path.join(dir_path, "build-info")
3203                 self._ebuild_path = os.path.join(self._infloc, pkg.pf + ".ebuild")
3204                 settings["EBUILD"] = self._ebuild_path
3205                 debug = settings.get("PORTAGE_DEBUG") == "1"
3206                 portage.doebuild_environment(self._ebuild_path, "setup",
3207                         settings["ROOT"], settings, debug, 1, self._bintree.dbapi)
3208                 settings.configdict["pkg"]["EMERGE_FROM"] = pkg.type_name
3209
3210                 # The prefetcher has already completed or it
3211                 # could be running now. If it's running now,
3212                 # wait for it to complete since it holds
3213                 # a lock on the file being fetched. The
3214                 # portage.locks functions are only designed
3215                 # to work between separate processes. Since
3216                 # the lock is held by the current process,
3217                 # use the scheduler and fetcher methods to
3218                 # synchronize with the fetcher.
3219                 prefetcher = self.prefetcher
3220                 if prefetcher is None:
3221                         pass
3222                 elif not prefetcher.isAlive():
3223                         prefetcher.cancel()
3224                 elif prefetcher.poll() is None:
3225
3226                         waiting_msg = ("Fetching '%s' " + \
3227                                 "in the background. " + \
3228                                 "To view fetch progress, run `tail -f " + \
3229                                 "/var/log/emerge-fetch.log` in another " + \
3230                                 "terminal.") % prefetcher.pkg_path
3231                         msg_prefix = colorize("GOOD", " * ")
3232                         from textwrap import wrap
3233                         waiting_msg = "".join("%s%s\n" % (msg_prefix, line) \
3234                                 for line in wrap(waiting_msg, 65))
3235                         if not self.background:
3236                                 writemsg(waiting_msg, noiselevel=-1)
3237
3238                         self._current_task = prefetcher
3239                         prefetcher.addExitListener(self._prefetch_exit)
3240                         return
3241
3242                 self._prefetch_exit(prefetcher)
3243
3244         def _prefetch_exit(self, prefetcher):
3245
3246                 pkg = self.pkg
3247                 pkg_count = self.pkg_count
3248                 if not (self.opts.pretend or self.opts.fetchonly):
3249                         self._build_dir.lock()
3250                         try:
3251                                 shutil.rmtree(self._build_dir.dir_path)
3252                         except EnvironmentError, e:
3253                                 if e.errno != errno.ENOENT:
3254                                         raise
3255                                 del e
3256                         portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3257                 fetcher = BinpkgFetcher(background=self.background,
3258                         logfile=self.settings.get("PORTAGE_LOG_FILE"), pkg=self.pkg,
3259                         pretend=self.opts.pretend, scheduler=self.scheduler)
3260                 pkg_path = fetcher.pkg_path
3261                 self._pkg_path = pkg_path
3262
3263                 if self.opts.getbinpkg and self._bintree.isremote(pkg.cpv):
3264
3265                         msg = " --- (%s of %s) Fetching Binary (%s::%s)" %\
3266                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3267                         short_msg = "emerge: (%s of %s) %s Fetch" % \
3268                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3269                         self.logger.log(msg, short_msg=short_msg)
3270                         self._start_task(fetcher, self._fetcher_exit)
3271                         return
3272
3273                 self._fetcher_exit(fetcher)
3274
3275         def _fetcher_exit(self, fetcher):
3276
3277                 # The fetcher only has a returncode when
3278                 # --getbinpkg is enabled.
3279                 if fetcher.returncode is not None:
3280                         self._fetched_pkg = True
3281                         if self._default_exit(fetcher) != os.EX_OK:
3282                                 self._unlock_builddir()
3283                                 self.wait()
3284                                 return
3285
3286                 if self.opts.pretend:
3287                         self._current_task = None
3288                         self.returncode = os.EX_OK
3289                         self.wait()
3290                         return
3291
3292                 verifier = None
3293                 if self._verify:
3294                         logfile = None
3295                         if self.background:
3296                                 logfile = self.settings.get("PORTAGE_LOG_FILE")
3297                         verifier = BinpkgVerifier(background=self.background,
3298                                 logfile=logfile, pkg=self.pkg)
3299                         self._start_task(verifier, self._verifier_exit)
3300                         return
3301
3302                 self._verifier_exit(verifier)
3303
3304         def _verifier_exit(self, verifier):
3305                 if verifier is not None and \
3306                         self._default_exit(verifier) != os.EX_OK:
3307                         self._unlock_builddir()
3308                         self.wait()
3309                         return
3310
3311                 logger = self.logger
3312                 pkg = self.pkg
3313                 pkg_count = self.pkg_count
3314                 pkg_path = self._pkg_path
3315
3316                 if self._fetched_pkg:
3317                         self._bintree.inject(pkg.cpv, filename=pkg_path)
3318
3319                 if self.opts.fetchonly:
3320                         self._current_task = None
3321                         self.returncode = os.EX_OK
3322                         self.wait()
3323                         return
3324
3325                 msg = " === (%s of %s) Merging Binary (%s::%s)" % \
3326                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg_path)
3327                 short_msg = "emerge: (%s of %s) %s Merge Binary" % \
3328                         (pkg_count.curval, pkg_count.maxval, pkg.cpv)
3329                 logger.log(msg, short_msg=short_msg)
3330
3331                 phase = "clean"
3332                 settings = self.settings
3333                 ebuild_phase = EbuildPhase(background=self.background,
3334                         pkg=pkg, phase=phase, scheduler=self.scheduler,
3335                         settings=settings, tree=self._tree)
3336
3337                 self._start_task(ebuild_phase, self._clean_exit)
3338
3339         def _clean_exit(self, clean_phase):
3340                 if self._default_exit(clean_phase) != os.EX_OK:
3341                         self._unlock_builddir()
3342                         self.wait()
3343                         return
3344
3345                 dir_path = self._build_dir.dir_path
3346
3347                 try:
3348                         shutil.rmtree(dir_path)
3349                 except (IOError, OSError), e:
3350                         if e.errno != errno.ENOENT:
3351                                 raise
3352                         del e
3353
3354                 infloc = self._infloc
3355                 pkg = self.pkg
3356                 pkg_path = self._pkg_path
3357
3358                 dir_mode = 0755
3359                 for mydir in (dir_path, self._image_dir, infloc):
3360                         portage.util.ensure_dirs(mydir, uid=portage.data.portage_uid,
3361                                 gid=portage.data.portage_gid, mode=dir_mode)
3362
3363                 # This initializes PORTAGE_LOG_FILE.
3364                 portage.prepare_build_dirs(self.settings["ROOT"], self.settings, 1)
3365                 self._writemsg_level(">>> Extracting info\n")
3366
3367                 pkg_xpak = portage.xpak.tbz2(self._pkg_path)
3368                 check_missing_metadata = ("CATEGORY", "PF")
3369                 missing_metadata = set()
3370                 for k in check_missing_metadata:
3371                         v = pkg_xpak.getfile(k)
3372                         if not v:
3373                                 missing_metadata.add(k)
3374
3375                 pkg_xpak.unpackinfo(infloc)
3376                 for k in missing_metadata:
3377                         if k == "CATEGORY":
3378                                 v = pkg.category
3379                         elif k == "PF":
3380                                 v = pkg.pf
3381                         else:
3382                                 continue
3383
3384                         f = open(os.path.join(infloc, k), 'wb')
3385                         try:
3386                                 f.write(v + "\n")
3387                         finally:
3388                                 f.close()
3389
3390                 # Store the md5sum in the vdb.
3391                 f = open(os.path.join(infloc, "BINPKGMD5"), "w")
3392                 try:
3393                         f.write(str(portage.checksum.perform_md5(pkg_path)) + "\n")
3394                 finally:
3395                         f.close()
3396
3397                 # This gives bashrc users an opportunity to do various things
3398                 # such as remove binary packages after they're installed.
3399                 settings = self.settings
3400                 settings.setcpv(self.pkg)
3401                 settings["PORTAGE_BINPKG_FILE"] = pkg_path
3402                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3403
3404                 phase = "setup"
3405                 setup_phase = EbuildPhase(background=self.background,
3406                         pkg=self.pkg, phase=phase, scheduler=self.scheduler,
3407                         settings=settings, tree=self._tree)
3408
3409                 setup_phase.addExitListener(self._setup_exit)
3410                 self._current_task = setup_phase
3411                 self.scheduler.scheduleSetup(setup_phase)
3412
3413         def _setup_exit(self, setup_phase):
3414                 if self._default_exit(setup_phase) != os.EX_OK:
3415                         self._unlock_builddir()
3416                         self.wait()
3417                         return
3418
3419                 extractor = BinpkgExtractorAsync(background=self.background,
3420                         image_dir=self._image_dir,
3421                         pkg=self.pkg, pkg_path=self._pkg_path, scheduler=self.scheduler)
3422                 self._writemsg_level(">>> Extracting %s\n" % self.pkg.cpv)
3423                 self._start_task(extractor, self._extractor_exit)
3424
3425         def _extractor_exit(self, extractor):
3426                 if self._final_exit(extractor) != os.EX_OK:
3427                         self._unlock_builddir()
3428                         writemsg("!!! Error Extracting '%s'\n" % self._pkg_path,
3429                                 noiselevel=-1)
3430                 self.wait()
3431
3432         def _unlock_builddir(self):
3433                 if self.opts.pretend or self.opts.fetchonly:
3434                         return
3435                 portage.elog.elog_process(self.pkg.cpv, self.settings)
3436                 self._build_dir.unlock()
3437
3438         def install(self):
3439
3440                 # This gives bashrc users an opportunity to do various things
3441                 # such as remove binary packages after they're installed.
3442                 settings = self.settings
3443                 settings["PORTAGE_BINPKG_FILE"] = self._pkg_path
3444                 settings.backup_changes("PORTAGE_BINPKG_FILE")
3445
3446                 merge = EbuildMerge(find_blockers=self.find_blockers,
3447                         ldpath_mtimes=self.ldpath_mtimes, logger=self.logger,
3448                         pkg=self.pkg, pkg_count=self.pkg_count,
3449                         pkg_path=self._pkg_path, scheduler=self.scheduler,
3450                         settings=settings, tree=self._tree, world_atom=self.world_atom)
3451
3452                 try:
3453                         retval = merge.execute()
3454                 finally:
3455                         settings.pop("PORTAGE_BINPKG_FILE", None)
3456                         self._unlock_builddir()
3457                 return retval
3458
3459 class BinpkgFetcher(SpawnProcess):
3460
3461         __slots__ = ("pkg", "pretend",
3462                 "locked", "pkg_path", "_lock_obj")
3463
3464         def __init__(self, **kwargs):
3465                 SpawnProcess.__init__(self, **kwargs)
3466                 pkg = self.pkg
3467                 self.pkg_path = pkg.root_config.trees["bintree"].getname(pkg.cpv)
3468
3469         def _start(self):
3470
3471                 if self.cancelled:
3472                         return
3473
3474                 pkg = self.pkg
3475                 pretend = self.pretend
3476                 bintree = pkg.root_config.trees["bintree"]
3477                 settings = bintree.settings
3478                 use_locks = "distlocks" in settings.features
3479                 pkg_path = self.pkg_path
3480
3481                 if not pretend:
3482                         portage.util.ensure_dirs(os.path.dirname(pkg_path))
3483                         if use_locks:
3484                                 self.lock()
3485                 exists = os.path.exists(pkg_path)
3486                 resume = exists and os.path.basename(pkg_path) in bintree.invalids
3487                 if not (pretend or resume):
3488                         # Remove existing file or broken symlink.
3489                         try:
3490                                 os.unlink(pkg_path)
3491                         except OSError:
3492                                 pass
3493
3494                 # urljoin doesn't work correctly with
3495                 # unrecognized protocols like sftp
3496                 if bintree._remote_has_index:
3497                         rel_uri = bintree._remotepkgs[pkg.cpv].get("PATH")
3498                         if not rel_uri:
3499                                 rel_uri = pkg.cpv + ".tbz2"
3500                         uri = bintree._remote_base_uri.rstrip("/") + \
3501                                 "/" + rel_uri.lstrip("/")
3502                 else:
3503                         uri = settings["PORTAGE_BINHOST"].rstrip("/") + \
3504                                 "/" + pkg.pf + ".tbz2"
3505
3506                 if pretend:
3507                         portage.writemsg_stdout("\n%s\n" % uri, noiselevel=-1)
3508                         self.returncode = os.EX_OK
3509                         self.wait()
3510                         return
3511
3512                 protocol = urlparse.urlparse(uri)[0]
3513                 fcmd_prefix = "FETCHCOMMAND"
3514                 if resume:
3515                         fcmd_prefix = "RESUMECOMMAND"
3516                 fcmd = settings.get(fcmd_prefix + "_" + protocol.upper())
3517                 if not fcmd:
3518                         fcmd = settings.get(fcmd_prefix)
3519
3520                 fcmd_vars = {
3521                         "DISTDIR" : os.path.dirname(pkg_path),
3522                         "URI"     : uri,
3523                         "FILE"    : os.path.basename(pkg_path)
3524                 }
3525
3526                 fetch_env = dict(settings.iteritems())
3527                 fetch_args = [portage.util.varexpand(x, mydict=fcmd_vars) \
3528                         for x in shlex.split(fcmd)]
3529
3530                 if self.fd_pipes is None:
3531                         self.fd_pipes = {}
3532                 fd_pipes = self.fd_pipes
3533
3534                 # Redirect all output to stdout since some fetchers like
3535                 # wget pollute stderr (if portage detects a problem then it
3536                 # can send it's own message to stderr).
3537                 fd_pipes.setdefault(0, sys.stdin.fileno())
3538                 fd_pipes.setdefault(1, sys.stdout.fileno())
3539                 fd_pipes.setdefault(2, sys.stdout.fileno())
3540
3541                 self.args = fetch_args
3542                 self.env = fetch_env
3543                 SpawnProcess._start(self)
3544
3545         def _set_returncode(self, wait_retval):
3546                 SpawnProcess._set_returncode(self, wait_retval)
3547                 if self.returncode == os.EX_OK:
3548                         # If possible, update the mtime to match the remote package if
3549                         # the fetcher didn't already do it automatically.
3550                         bintree = self.pkg.root_config.trees["bintree"]
3551                         if bintree._remote_has_index:
3552                                 remote_mtime = bintree._remotepkgs[self.pkg.cpv].get("MTIME")
3553                                 if remote_mtime is not None:
3554                                         try:
3555                                                 remote_mtime = long(remote_mtime)
3556                                         except ValueError:
3557                                                 pass
3558                                         else:
3559                                                 try:
3560                                                         local_mtime = long(os.stat(self.pkg_path).st_mtime)
3561                                                 except OSError:
3562                                                         pass
3563                                                 else:
3564                                                         if remote_mtime != local_mtime:
3565                                                                 try:
3566                                                                         os.utime(self.pkg_path,
3567                                                                                 (remote_mtime, remote_mtime))
3568                                                                 except OSError:
3569                                                                         pass
3570
3571                 if self.locked:
3572                         self.unlock()
3573
3574         def lock(self):
3575                 """
3576                 This raises an AlreadyLocked exception if lock() is called
3577                 while a lock is already held. In order to avoid this, call
3578                 unlock() or check whether the "locked" attribute is True
3579                 or False before calling lock().
3580                 """
3581                 if self._lock_obj is not None:
3582                         raise self.AlreadyLocked((self._lock_obj,))
3583
3584                 self._lock_obj = portage.locks.lockfile(
3585                         self.pkg_path, wantnewlockfile=1)
3586                 self.locked = True
3587
3588         class AlreadyLocked(portage.exception.PortageException):
3589                 pass
3590
3591         def unlock(self):
3592                 if self._lock_obj is None:
3593                         return
3594                 portage.locks.unlockfile(self._lock_obj)
3595                 self._lock_obj = None
3596                 self.locked = False
3597
3598 class BinpkgVerifier(AsynchronousTask):
3599         __slots__ = ("logfile", "pkg",)
3600
3601         def _start(self):
3602                 """
3603                 Note: Unlike a normal AsynchronousTask.start() method,
3604                 this one does all work is synchronously. The returncode
3605                 attribute will be set before it returns.
3606                 """
3607
3608                 pkg = self.pkg
3609                 root_config = pkg.root_config
3610                 bintree = root_config.trees["bintree"]
3611                 rval = os.EX_OK
3612                 stdout_orig = sys.stdout
3613                 stderr_orig = sys.stderr
3614                 log_file = None
3615                 if self.background and self.logfile is not None:
3616                         log_file = open(self.logfile, 'a')
3617                 try:
3618                         if log_file is not None:
3619                                 sys.stdout = log_file
3620                                 sys.stderr = log_file
3621                         try:
3622                                 bintree.digestCheck(pkg)
3623                         except portage.exception.FileNotFound:
3624                                 writemsg("!!! Fetching Binary failed " + \
3625                                         "for '%s'\n" % pkg.cpv, noiselevel=-1)
3626                                 rval = 1
3627                         except portage.exception.DigestException, e:
3628                                 writemsg("\n!!! Digest verification failed:\n",
3629                                         noiselevel=-1)
3630                                 writemsg("!!! %s\n" % e.value[0],
3631                                         noiselevel=-1)
3632                                 writemsg("!!! Reason: %s\n" % e.value[1],
3633                                         noiselevel=-1)
3634                                 writemsg("!!! Got: %s\n" % e.value[2],
3635                                         noiselevel=-1)
3636                                 writemsg("!!! Expected: %s\n" % e.value[3],
3637                                         noiselevel=-1)
3638                                 rval = 1
3639                         if rval != os.EX_OK:
3640                                 pkg_path = bintree.getname(pkg.cpv)
3641                                 head, tail = os.path.split(pkg_path)
3642                                 temp_filename = portage._checksum_failure_temp_file(head, tail)
3643                                 writemsg("File renamed to '%s'\n" % (temp_filename,),
3644                                         noiselevel=-1)
3645                 finally:
3646                         sys.stdout = stdout_orig
3647                         sys.stderr = stderr_orig
3648                         if log_file is not None:
3649                                 log_file.close()
3650
3651                 self.returncode = rval
3652                 self.wait()
3653
3654 class BinpkgPrefetcher(CompositeTask):
3655
3656         __slots__ = ("pkg",) + \
3657                 ("pkg_path", "_bintree",)
3658
3659         def _start(self):
3660                 self._bintree = self.pkg.root_config.trees["bintree"]
3661                 fetcher = BinpkgFetcher(background=self.background,
3662                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg,
3663                         scheduler=self.scheduler)
3664                 self.pkg_path = fetcher.pkg_path
3665                 self._start_task(fetcher, self._fetcher_exit)
3666
3667         def _fetcher_exit(self, fetcher):
3668
3669                 if self._default_exit(fetcher) != os.EX_OK:
3670                         self.wait()
3671                         return
3672
3673                 verifier = BinpkgVerifier(background=self.background,
3674                         logfile=self.scheduler.fetch.log_file, pkg=self.pkg)
3675                 self._start_task(verifier, self._verifier_exit)
3676
3677         def _verifier_exit(self, verifier):
3678                 if self._default_exit(verifier) != os.EX_OK:
3679                         self.wait()
3680                         return
3681
3682                 self._bintree.inject(self.pkg.cpv, filename=self.pkg_path)
3683
3684                 self._current_task = None
3685                 self.returncode = os.EX_OK
3686                 self.wait()
3687
3688 class BinpkgExtractorAsync(SpawnProcess):
3689
3690         __slots__ = ("image_dir", "pkg", "pkg_path")
3691
3692         _shell_binary = portage.const.BASH_BINARY
3693
3694         def _start(self):
3695                 self.args = [self._shell_binary, "-c",
3696                         "bzip2 -dqc -- %s | tar -xp -C %s -f -" % \
3697                         (portage._shell_quote(self.pkg_path),
3698                         portage._shell_quote(self.image_dir))]
3699
3700                 self.env = self.pkg.root_config.settings.environ()
3701                 SpawnProcess._start(self)
3702
3703 class MergeListItem(CompositeTask):
3704
3705         """
3706         TODO: For parallel scheduling, everything here needs asynchronous
3707         execution support (start, poll, and wait methods).
3708         """
3709
3710         __slots__ = ("args_set",
3711                 "binpkg_opts", "build_opts", "config_pool", "emerge_opts",
3712                 "find_blockers", "logger", "mtimedb", "pkg",
3713                 "pkg_count", "pkg_to_replace", "prefetcher",
3714                 "settings", "statusMessage", "world_atom") + \
3715                 ("_install_task",)
3716
3717         def _start(self):
3718
3719                 pkg = self.pkg
3720                 build_opts = self.build_opts
3721
3722                 if pkg.installed:
3723                         # uninstall,  executed by self.merge()
3724                         self.returncode = os.EX_OK
3725                         self.wait()
3726                         return
3727
3728                 args_set = self.args_set
3729                 find_blockers = self.find_blockers
3730                 logger = self.logger
3731                 mtimedb = self.mtimedb
3732                 pkg_count = self.pkg_count
3733                 scheduler = self.scheduler
3734                 settings = self.settings
3735                 world_atom = self.world_atom
3736                 ldpath_mtimes = mtimedb["ldpath"]
3737
3738                 action_desc = "Emerging"
3739                 preposition = "for"
3740                 if pkg.type_name == "binary":
3741                         action_desc += " binary"
3742
3743                 if build_opts.fetchonly:
3744                         action_desc = "Fetching"
3745
3746                 msg = "%s (%s of %s) %s" % \
3747                         (action_desc,
3748                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.curval)),
3749                         colorize("MERGE_LIST_PROGRESS", str(pkg_count.maxval)),
3750                         colorize("GOOD", pkg.cpv))
3751
3752                 portdb = pkg.root_config.trees["porttree"].dbapi
3753                 portdir_repo_name = portdb._repository_map.get(portdb.porttree_root)
3754                 if portdir_repo_name:
3755                         pkg_repo_name = pkg.metadata.get("repository")
3756                         if pkg_repo_name != portdir_repo_name:
3757                                 if not pkg_repo_name:
3758                                         pkg_repo_name = "unknown repo"
3759                                 msg += " from %s" % pkg_repo_name
3760
3761                 if pkg.root != "/":
3762                         msg += " %s %s" % (preposition, pkg.root)
3763
3764                 if not build_opts.pretend:
3765                         self.statusMessage(msg)
3766                         logger.log(" >>> emerge (%s of %s) %s to %s" % \
3767                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
3768
3769                 if pkg.type_name == "ebuild":
3770
3771                         build = EbuildBuild(args_set=args_set,
3772                                 background=self.background,
3773                                 config_pool=self.config_pool,
3774                                 find_blockers=find_blockers,
3775                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3776                                 opts=build_opts, pkg=pkg, pkg_count=pkg_count,
3777                                 prefetcher=self.prefetcher, scheduler=scheduler,
3778                                 settings=settings, world_atom=world_atom)
3779
3780                         self._install_task = build
3781                         self._start_task(build, self._default_final_exit)
3782                         return
3783
3784                 elif pkg.type_name == "binary":
3785
3786                         binpkg = Binpkg(background=self.background,
3787                                 find_blockers=find_blockers,
3788                                 ldpath_mtimes=ldpath_mtimes, logger=logger,
3789                                 opts=self.binpkg_opts, pkg=pkg, pkg_count=pkg_count,
3790                                 prefetcher=self.prefetcher, settings=settings,
3791                                 scheduler=scheduler, world_atom=world_atom)
3792
3793                         self._install_task = binpkg
3794                         self._start_task(binpkg, self._default_final_exit)
3795                         return
3796
3797         def _poll(self):
3798                 self._install_task.poll()
3799                 return self.returncode
3800
3801         def _wait(self):
3802                 self._install_task.wait()
3803                 return self.returncode
3804
3805         def merge(self):
3806
3807                 pkg = self.pkg
3808                 build_opts = self.build_opts
3809                 find_blockers = self.find_blockers
3810                 logger = self.logger
3811                 mtimedb = self.mtimedb
3812                 pkg_count = self.pkg_count
3813                 prefetcher = self.prefetcher
3814                 scheduler = self.scheduler
3815                 settings = self.settings
3816                 world_atom = self.world_atom
3817                 ldpath_mtimes = mtimedb["ldpath"]
3818
3819                 if pkg.installed:
3820                         if not (build_opts.buildpkgonly or \
3821                                 build_opts.fetchonly or build_opts.pretend):
3822
3823                                 uninstall = PackageUninstall(background=self.background,
3824                                         ldpath_mtimes=ldpath_mtimes, opts=self.emerge_opts,
3825                                         pkg=pkg, scheduler=scheduler, settings=settings)
3826
3827                                 uninstall.start()
3828                                 retval = uninstall.wait()
3829                                 if retval != os.EX_OK:
3830                                         return retval
3831                         return os.EX_OK
3832
3833                 if build_opts.fetchonly or \
3834                         build_opts.buildpkgonly:
3835                         return self.returncode
3836
3837                 retval = self._install_task.install()
3838                 return retval
3839
3840 class PackageMerge(AsynchronousTask):
3841         """
3842         TODO: Implement asynchronous merge so that the scheduler can
3843         run while a merge is executing.
3844         """
3845
3846         __slots__ = ("merge",)
3847
3848         def _start(self):
3849
3850                 pkg = self.merge.pkg
3851                 pkg_count = self.merge.pkg_count
3852
3853                 if pkg.installed:
3854                         action_desc = "Uninstalling"
3855                         preposition = "from"
3856                 else:
3857                         action_desc = "Installing"
3858                         preposition = "to"
3859
3860                 msg = "%s %s" % (action_desc, colorize("GOOD", pkg.cpv))
3861
3862                 if pkg.root != "/":
3863                         msg += " %s %s" % (preposition, pkg.root)
3864
3865                 if not self.merge.build_opts.fetchonly and \
3866                         not self.merge.build_opts.pretend and \
3867                         not self.merge.build_opts.buildpkgonly:
3868                         self.merge.statusMessage(msg)
3869
3870                 self.returncode = self.merge.merge()
3871                 self.wait()
3872
3873 class DependencyArg(object):
3874         def __init__(self, arg=None, root_config=None):
3875                 self.arg = arg
3876                 self.root_config = root_config
3877
3878         def __str__(self):
3879                 return str(self.arg)
3880
3881 class AtomArg(DependencyArg):
3882         def __init__(self, atom=None, **kwargs):
3883                 DependencyArg.__init__(self, **kwargs)
3884                 self.atom = atom
3885                 if not isinstance(self.atom, portage.dep.Atom):
3886                         self.atom = portage.dep.Atom(self.atom)
3887                 self.set = (self.atom, )
3888
3889 class PackageArg(DependencyArg):
3890         def __init__(self, package=None, **kwargs):
3891                 DependencyArg.__init__(self, **kwargs)
3892                 self.package = package
3893                 self.atom = portage.dep.Atom("=" + package.cpv)
3894                 self.set = (self.atom, )
3895
3896 class SetArg(DependencyArg):
3897         def __init__(self, set=None, **kwargs):
3898                 DependencyArg.__init__(self, **kwargs)
3899                 self.set = set
3900                 self.name = self.arg[len(SETPREFIX):]
3901
3902 class Dependency(SlotObject):
3903         __slots__ = ("atom", "blocker", "depth",
3904                 "parent", "onlydeps", "priority", "root")
3905         def __init__(self, **kwargs):
3906                 SlotObject.__init__(self, **kwargs)
3907                 if self.priority is None:
3908                         self.priority = DepPriority()
3909                 if self.depth is None:
3910                         self.depth = 0
3911
3912 class BlockerCache(DictMixin):
3913         """This caches blockers of installed packages so that dep_check does not
3914         have to be done for every single installed package on every invocation of
3915         emerge.  The cache is invalidated whenever it is detected that something
3916         has changed that might alter the results of dep_check() calls:
3917                 1) the set of installed packages (including COUNTER) has changed
3918                 2) the old-style virtuals have changed
3919         """
3920
3921         # Number of uncached packages to trigger cache update, since
3922         # it's wasteful to update it for every vdb change.
3923         _cache_threshold = 5
3924
3925         class BlockerData(object):
3926
3927                 __slots__ = ("__weakref__", "atoms", "counter")
3928
3929                 def __init__(self, counter, atoms):
3930                         self.counter = counter
3931                         self.atoms = atoms
3932
3933         def __init__(self, myroot, vardb):
3934                 self._vardb = vardb
3935                 self._virtuals = vardb.settings.getvirtuals()
3936                 self._cache_filename = os.path.join(myroot,
3937                         portage.CACHE_PATH.lstrip(os.path.sep), "vdb_blockers.pickle")
3938                 self._cache_version = "1"
3939                 self._cache_data = None
3940                 self._modified = set()
3941                 self._load()
3942
3943         def _load(self):
3944                 try:
3945                         f = open(self._cache_filename)
3946                         mypickle = pickle.Unpickler(f)
3947                         mypickle.find_global = None
3948                         self._cache_data = mypickle.load()
3949                         f.close()
3950                         del f
3951                 except (IOError, OSError, EOFError, pickle.UnpicklingError), e:
3952                         if isinstance(e, pickle.UnpicklingError):
3953                                 writemsg("!!! Error loading '%s': %s\n" % \
3954                                         (self._cache_filename, str(e)), noiselevel=-1)
3955                         del e
3956
3957                 cache_valid = self._cache_data and \
3958                         isinstance(self._cache_data, dict) and \
3959                         self._cache_data.get("version") == self._cache_version and \
3960                         isinstance(self._cache_data.get("blockers"), dict)
3961                 if cache_valid:
3962                         # Validate all the atoms and counters so that
3963                         # corruption is detected as soon as possible.
3964                         invalid_items = set()
3965                         for k, v in self._cache_data["blockers"].iteritems():
3966                                 if not isinstance(k, basestring):
3967                                         invalid_items.add(k)
3968                                         continue
3969                                 try:
3970                                         if portage.catpkgsplit(k) is None:
3971                                                 invalid_items.add(k)
3972                                                 continue
3973                                 except portage.exception.InvalidData:
3974                                         invalid_items.add(k)
3975                                         continue
3976                                 if not isinstance(v, tuple) or \
3977                                         len(v) != 2:
3978                                         invalid_items.add(k)
3979                                         continue
3980                                 counter, atoms = v
3981                                 if not isinstance(counter, (int, long)):
3982                                         invalid_items.add(k)
3983                                         continue
3984                                 if not isinstance(atoms, (list, tuple)):
3985                                         invalid_items.add(k)
3986                                         continue
3987                                 invalid_atom = False
3988                                 for atom in atoms:
3989                                         if not isinstance(atom, basestring):
3990                                                 invalid_atom = True
3991                                                 break
3992                                         if atom[:1] != "!" or \
3993                                                 not portage.isvalidatom(
3994                                                 atom, allow_blockers=True):
3995                                                 invalid_atom = True
3996                                                 break
3997                                 if invalid_atom:
3998                                         invalid_items.add(k)
3999                                         continue
4000
4001                         for k in invalid_items:
4002                                 del self._cache_data["blockers"][k]
4003                         if not self._cache_data["blockers"]:
4004                                 cache_valid = False
4005
4006                 if not cache_valid:
4007                         self._cache_data = {"version":self._cache_version}
4008                         self._cache_data["blockers"] = {}
4009                         self._cache_data["virtuals"] = self._virtuals
4010                 self._modified.clear()
4011
4012         def flush(self):
4013                 """If the current user has permission and the internal blocker cache
4014                 been updated, save it to disk and mark it unmodified.  This is called
4015                 by emerge after it has proccessed blockers for all installed packages.
4016                 Currently, the cache is only written if the user has superuser
4017                 privileges (since that's required to obtain a lock), but all users
4018                 have read access and benefit from faster blocker lookups (as long as
4019                 the entire cache is still valid).  The cache is stored as a pickled
4020                 dict object with the following format:
4021
4022                 {
4023                         version : "1",
4024                         "blockers" : {cpv1:(counter,(atom1, atom2...)), cpv2...},
4025                         "virtuals" : vardb.settings.getvirtuals()
4026                 }
4027                 """
4028                 if len(self._modified) >= self._cache_threshold and \
4029                         secpass >= 2:
4030                         try:
4031                                 f = portage.util.atomic_ofstream(self._cache_filename)
4032                                 pickle.dump(self._cache_data, f, -1)
4033                                 f.close()
4034                                 portage.util.apply_secpass_permissions(
4035                                         self._cache_filename, gid=portage.portage_gid, mode=0644)
4036                         except (IOError, OSError), e:
4037                                 pass
4038                         self._modified.clear()
4039
4040         def __setitem__(self, cpv, blocker_data):
4041                 """
4042                 Update the cache and mark it as modified for a future call to
4043                 self.flush().
4044
4045                 @param cpv: Package for which to cache blockers.
4046                 @type cpv: String
4047                 @param blocker_data: An object with counter and atoms attributes.
4048                 @type blocker_data: BlockerData
4049                 """
4050                 self._cache_data["blockers"][cpv] = \
4051                         (blocker_data.counter, tuple(str(x) for x in blocker_data.atoms))
4052                 self._modified.add(cpv)
4053
4054         def __iter__(self):
4055                 if self._cache_data is None:
4056                         # triggered by python-trace
4057                         return iter([])
4058                 return iter(self._cache_data["blockers"])
4059
4060         def __delitem__(self, cpv):
4061                 del self._cache_data["blockers"][cpv]
4062
4063         def __getitem__(self, cpv):
4064                 """
4065                 @rtype: BlockerData
4066                 @returns: An object with counter and atoms attributes.
4067                 """
4068                 return self.BlockerData(*self._cache_data["blockers"][cpv])
4069
4070         def keys(self):
4071                 """This needs to be implemented so that self.__repr__() doesn't raise
4072                 an AttributeError."""
4073                 return list(self)
4074
4075 class BlockerDB(object):
4076
4077         def __init__(self, root_config):
4078                 self._root_config = root_config
4079                 self._vartree = root_config.trees["vartree"]
4080                 self._portdb = root_config.trees["porttree"].dbapi
4081
4082                 self._dep_check_trees = None
4083                 self._fake_vartree = None
4084
4085         def _get_fake_vartree(self, acquire_lock=0):
4086                 fake_vartree = self._fake_vartree
4087                 if fake_vartree is None:
4088                         fake_vartree = FakeVartree(self._root_config,
4089                                 acquire_lock=acquire_lock)
4090                         self._fake_vartree = fake_vartree
4091                         self._dep_check_trees = { self._vartree.root : {
4092                                 "porttree"    :  fake_vartree,
4093                                 "vartree"     :  fake_vartree,
4094                         }}
4095                 else:
4096                         fake_vartree.sync(acquire_lock=acquire_lock)
4097                 return fake_vartree
4098
4099         def findInstalledBlockers(self, new_pkg, acquire_lock=0):
4100                 blocker_cache = BlockerCache(self._vartree.root, self._vartree.dbapi)
4101                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4102                 settings = self._vartree.settings
4103                 stale_cache = set(blocker_cache)
4104                 fake_vartree = self._get_fake_vartree(acquire_lock=acquire_lock)
4105                 dep_check_trees = self._dep_check_trees
4106                 vardb = fake_vartree.dbapi
4107                 installed_pkgs = list(vardb)
4108
4109                 for inst_pkg in installed_pkgs:
4110                         stale_cache.discard(inst_pkg.cpv)
4111                         cached_blockers = blocker_cache.get(inst_pkg.cpv)
4112                         if cached_blockers is not None and \
4113                                 cached_blockers.counter != long(inst_pkg.metadata["COUNTER"]):
4114                                 cached_blockers = None
4115                         if cached_blockers is not None:
4116                                 blocker_atoms = cached_blockers.atoms
4117                         else:
4118                                 # Use aux_get() to trigger FakeVartree global
4119                                 # updates on *DEPEND when appropriate.
4120                                 depstr = " ".join(vardb.aux_get(inst_pkg.cpv, dep_keys))
4121                                 try:
4122                                         portage.dep._dep_check_strict = False
4123                                         success, atoms = portage.dep_check(depstr,
4124                                                 vardb, settings, myuse=inst_pkg.use.enabled,
4125                                                 trees=dep_check_trees, myroot=inst_pkg.root)
4126                                 finally:
4127                                         portage.dep._dep_check_strict = True
4128                                 if not success:
4129                                         pkg_location = os.path.join(inst_pkg.root,
4130                                                 portage.VDB_PATH, inst_pkg.category, inst_pkg.pf)
4131                                         portage.writemsg("!!! %s/*DEPEND: %s\n" % \
4132                                                 (pkg_location, atoms), noiselevel=-1)
4133                                         continue
4134
4135                                 blocker_atoms = [atom for atom in atoms \
4136                                         if atom.startswith("!")]
4137                                 blocker_atoms.sort()
4138                                 counter = long(inst_pkg.metadata["COUNTER"])
4139                                 blocker_cache[inst_pkg.cpv] = \
4140                                         blocker_cache.BlockerData(counter, blocker_atoms)
4141                 for cpv in stale_cache:
4142                         del blocker_cache[cpv]
4143                 blocker_cache.flush()
4144
4145                 blocker_parents = digraph()
4146                 blocker_atoms = []
4147                 for pkg in installed_pkgs:
4148                         for blocker_atom in blocker_cache[pkg.cpv].atoms:
4149                                 blocker_atom = blocker_atom.lstrip("!")
4150                                 blocker_atoms.append(blocker_atom)
4151                                 blocker_parents.add(blocker_atom, pkg)
4152
4153                 blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4154                 blocking_pkgs = set()
4155                 for atom in blocker_atoms.iterAtomsForPackage(new_pkg):
4156                         blocking_pkgs.update(blocker_parents.parent_nodes(atom))
4157
4158                 # Check for blockers in the other direction.
4159                 depstr = " ".join(new_pkg.metadata[k] for k in dep_keys)
4160                 try:
4161                         portage.dep._dep_check_strict = False
4162                         success, atoms = portage.dep_check(depstr,
4163                                 vardb, settings, myuse=new_pkg.use.enabled,
4164                                 trees=dep_check_trees, myroot=new_pkg.root)
4165                 finally:
4166                         portage.dep._dep_check_strict = True
4167                 if not success:
4168                         # We should never get this far with invalid deps.
4169                         show_invalid_depstring_notice(new_pkg, depstr, atoms)
4170                         assert False
4171
4172                 blocker_atoms = [atom.lstrip("!") for atom in atoms \
4173                         if atom[:1] == "!"]
4174                 if blocker_atoms:
4175                         blocker_atoms = InternalPackageSet(initial_atoms=blocker_atoms)
4176                         for inst_pkg in installed_pkgs:
4177                                 try:
4178                                         blocker_atoms.iterAtomsForPackage(inst_pkg).next()
4179                                 except (portage.exception.InvalidDependString, StopIteration):
4180                                         continue
4181                                 blocking_pkgs.add(inst_pkg)
4182
4183                 return blocking_pkgs
4184
4185 def show_invalid_depstring_notice(parent_node, depstring, error_msg):
4186
4187         msg1 = "\n\n!!! Invalid or corrupt dependency specification: " + \
4188                 "\n\n%s\n\n%s\n\n%s\n\n" % (error_msg, parent_node, depstring)
4189         p_type, p_root, p_key, p_status = parent_node
4190         msg = []
4191         if p_status == "nomerge":
4192                 category, pf = portage.catsplit(p_key)
4193                 pkg_location = os.path.join(p_root, portage.VDB_PATH, category, pf)
4194                 msg.append("Portage is unable to process the dependencies of the ")
4195                 msg.append("'%s' package. " % p_key)
4196                 msg.append("In order to correct this problem, the package ")
4197                 msg.append("should be uninstalled, reinstalled, or upgraded. ")
4198                 msg.append("As a temporary workaround, the --nodeps option can ")
4199                 msg.append("be used to ignore all dependencies.  For reference, ")
4200                 msg.append("the problematic dependencies can be found in the ")
4201                 msg.append("*DEPEND files located in '%s/'." % pkg_location)
4202         else:
4203                 msg.append("This package can not be installed. ")
4204                 msg.append("Please notify the '%s' package maintainer " % p_key)
4205                 msg.append("about this problem.")
4206
4207         msg2 = "".join("%s\n" % line for line in textwrap.wrap("".join(msg), 72))
4208         writemsg_level(msg1 + msg2, level=logging.ERROR, noiselevel=-1)
4209
4210 class PackageVirtualDbapi(portage.dbapi):
4211         """
4212         A dbapi-like interface class that represents the state of the installed
4213         package database as new packages are installed, replacing any packages
4214         that previously existed in the same slot. The main difference between
4215         this class and fakedbapi is that this one uses Package instances
4216         internally (passed in via cpv_inject() and cpv_remove() calls).
4217         """
4218         def __init__(self, settings):
4219                 portage.dbapi.__init__(self)
4220                 self.settings = settings
4221                 self._match_cache = {}
4222                 self._cp_map = {}
4223                 self._cpv_map = {}
4224
4225         def clear(self):
4226                 """
4227                 Remove all packages.
4228                 """
4229                 if self._cpv_map:
4230                         self._clear_cache()
4231                         self._cp_map.clear()
4232                         self._cpv_map.clear()
4233
4234         def copy(self):
4235                 obj = PackageVirtualDbapi(self.settings)
4236                 obj._match_cache = self._match_cache.copy()
4237                 obj._cp_map = self._cp_map.copy()
4238                 for k, v in obj._cp_map.iteritems():
4239                         obj._cp_map[k] = v[:]
4240                 obj._cpv_map = self._cpv_map.copy()
4241                 return obj
4242
4243         def __iter__(self):
4244                 return self._cpv_map.itervalues()
4245
4246         def __contains__(self, item):
4247                 existing = self._cpv_map.get(item.cpv)
4248                 if existing is not None and \
4249                         existing == item:
4250                         return True
4251                 return False
4252
4253         def get(self, item, default=None):
4254                 cpv = getattr(item, "cpv", None)
4255                 if cpv is None:
4256                         if len(item) != 4:
4257                                 return default
4258                         type_name, root, cpv, operation = item
4259
4260                 existing = self._cpv_map.get(cpv)
4261                 if existing is not None and \
4262                         existing == item:
4263                         return existing
4264                 return default
4265
4266         def match_pkgs(self, atom):
4267                 return [self._cpv_map[cpv] for cpv in self.match(atom)]
4268
4269         def _clear_cache(self):
4270                 if self._categories is not None:
4271                         self._categories = None
4272                 if self._match_cache:
4273                         self._match_cache = {}
4274
4275         def match(self, origdep, use_cache=1):
4276                 result = self._match_cache.get(origdep)
4277                 if result is not None:
4278                         return result[:]
4279                 result = portage.dbapi.match(self, origdep, use_cache=use_cache)
4280                 self._match_cache[origdep] = result
4281                 return result[:]
4282
4283         def cpv_exists(self, cpv):
4284                 return cpv in self._cpv_map
4285
4286         def cp_list(self, mycp, use_cache=1):
4287                 cachelist = self._match_cache.get(mycp)
4288                 # cp_list() doesn't expand old-style virtuals
4289                 if cachelist and cachelist[0].startswith(mycp):
4290                         return cachelist[:]
4291                 cpv_list = self._cp_map.get(mycp)
4292                 if cpv_list is None:
4293                         cpv_list = []
4294                 else:
4295                         cpv_list = [pkg.cpv for pkg in cpv_list]
4296                 self._cpv_sort_ascending(cpv_list)
4297                 if not (not cpv_list and mycp.startswith("virtual/")):
4298                         self._match_cache[mycp] = cpv_list
4299                 return cpv_list[:]
4300
4301         def cp_all(self):
4302                 return list(self._cp_map)
4303
4304         def cpv_all(self):
4305                 return list(self._cpv_map)
4306
4307         def cpv_inject(self, pkg):
4308                 cp_list = self._cp_map.get(pkg.cp)
4309                 if cp_list is None:
4310                         cp_list = []
4311                         self._cp_map[pkg.cp] = cp_list
4312                 e_pkg = self._cpv_map.get(pkg.cpv)
4313                 if e_pkg is not None:
4314                         if e_pkg == pkg:
4315                                 return
4316                         self.cpv_remove(e_pkg)
4317                 for e_pkg in cp_list:
4318                         if e_pkg.slot_atom == pkg.slot_atom:
4319                                 if e_pkg == pkg:
4320                                         return
4321                                 self.cpv_remove(e_pkg)
4322                                 break
4323                 cp_list.append(pkg)
4324                 self._cpv_map[pkg.cpv] = pkg
4325                 self._clear_cache()
4326
4327         def cpv_remove(self, pkg):
4328                 old_pkg = self._cpv_map.get(pkg.cpv)
4329                 if old_pkg != pkg:
4330                         raise KeyError(pkg)
4331                 self._cp_map[pkg.cp].remove(pkg)
4332                 del self._cpv_map[pkg.cpv]
4333                 self._clear_cache()
4334
4335         def aux_get(self, cpv, wants):
4336                 metadata = self._cpv_map[cpv].metadata
4337                 return [metadata.get(x, "") for x in wants]
4338
4339         def aux_update(self, cpv, values):
4340                 self._cpv_map[cpv].metadata.update(values)
4341                 self._clear_cache()
4342
4343 class depgraph(object):
4344
4345         pkg_tree_map = RootConfig.pkg_tree_map
4346
4347         _dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
4348
4349         def __init__(self, settings, trees, myopts, myparams, spinner):
4350                 self.settings = settings
4351                 self.target_root = settings["ROOT"]
4352                 self.myopts = myopts
4353                 self.myparams = myparams
4354                 self.edebug = 0
4355                 if settings.get("PORTAGE_DEBUG", "") == "1":
4356                         self.edebug = 1
4357                 self.spinner = spinner
4358                 self._running_root = trees["/"]["root_config"]
4359                 self._opts_no_restart = Scheduler._opts_no_restart
4360                 self.pkgsettings = {}
4361                 # Maps slot atom to package for each Package added to the graph.
4362                 self._slot_pkg_map = {}
4363                 # Maps nodes to the reasons they were selected for reinstallation.
4364                 self._reinstall_nodes = {}
4365                 self.mydbapi = {}
4366                 self.trees = {}
4367                 self._trees_orig = trees
4368                 self.roots = {}
4369                 # Contains a filtered view of preferred packages that are selected
4370                 # from available repositories.
4371                 self._filtered_trees = {}
4372                 # Contains installed packages and new packages that have been added
4373                 # to the graph.
4374                 self._graph_trees = {}
4375                 # All Package instances
4376                 self._pkg_cache = {}
4377                 for myroot in trees:
4378                         self.trees[myroot] = {}
4379                         # Create a RootConfig instance that references
4380                         # the FakeVartree instead of the real one.
4381                         self.roots[myroot] = RootConfig(
4382                                 trees[myroot]["vartree"].settings,
4383                                 self.trees[myroot],
4384                                 trees[myroot]["root_config"].setconfig)
4385                         for tree in ("porttree", "bintree"):
4386                                 self.trees[myroot][tree] = trees[myroot][tree]
4387                         self.trees[myroot]["vartree"] = \
4388                                 FakeVartree(trees[myroot]["root_config"],
4389                                         pkg_cache=self._pkg_cache)
4390                         self.pkgsettings[myroot] = portage.config(
4391                                 clone=self.trees[myroot]["vartree"].settings)
4392                         self._slot_pkg_map[myroot] = {}
4393                         vardb = self.trees[myroot]["vartree"].dbapi
4394                         preload_installed_pkgs = "--nodeps" not in self.myopts and \
4395                                 "--buildpkgonly" not in self.myopts
4396                         # This fakedbapi instance will model the state that the vdb will
4397                         # have after new packages have been installed.
4398                         fakedb = PackageVirtualDbapi(vardb.settings)
4399                         if preload_installed_pkgs:
4400                                 for pkg in vardb:
4401                                         self.spinner.update()
4402                                         # This triggers metadata updates via FakeVartree.
4403                                         vardb.aux_get(pkg.cpv, [])
4404                                         fakedb.cpv_inject(pkg)
4405
4406                         # Now that the vardb state is cached in our FakeVartree,
4407                         # we won't be needing the real vartree cache for awhile.
4408                         # To make some room on the heap, clear the vardbapi
4409                         # caches.
4410                         trees[myroot]["vartree"].dbapi._clear_cache()
4411                         gc.collect()
4412
4413                         self.mydbapi[myroot] = fakedb
4414                         def graph_tree():
4415                                 pass
4416                         graph_tree.dbapi = fakedb
4417                         self._graph_trees[myroot] = {}
4418                         self._filtered_trees[myroot] = {}
4419                         # Substitute the graph tree for the vartree in dep_check() since we
4420                         # want atom selections to be consistent with package selections
4421                         # have already been made.
4422                         self._graph_trees[myroot]["porttree"]   = graph_tree
4423                         self._graph_trees[myroot]["vartree"]    = graph_tree
4424                         def filtered_tree():
4425                                 pass
4426                         filtered_tree.dbapi = self._dep_check_composite_db(self, myroot)
4427                         self._filtered_trees[myroot]["porttree"] = filtered_tree
4428
4429                         # Passing in graph_tree as the vartree here could lead to better
4430                         # atom selections in some cases by causing atoms for packages that
4431                         # have been added to the graph to be preferred over other choices.
4432                         # However, it can trigger atom selections that result in
4433                         # unresolvable direct circular dependencies. For example, this
4434                         # happens with gwydion-dylan which depends on either itself or
4435                         # gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
4436                         # gwydion-dylan-bin needs to be selected in order to avoid a
4437                         # an unresolvable direct circular dependency.
4438                         #
4439                         # To solve the problem described above, pass in "graph_db" so that
4440                         # packages that have been added to the graph are distinguishable
4441                         # from other available packages and installed packages. Also, pass
4442                         # the parent package into self._select_atoms() calls so that
4443                         # unresolvable direct circular dependencies can be detected and
4444                         # avoided when possible.
4445                         self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
4446                         self._filtered_trees[myroot]["vartree"] = self.trees[myroot]["vartree"]
4447
4448                         dbs = []
4449                         portdb = self.trees[myroot]["porttree"].dbapi
4450                         bindb  = self.trees[myroot]["bintree"].dbapi
4451                         vardb  = self.trees[myroot]["vartree"].dbapi
4452                         #               (db, pkg_type, built, installed, db_keys)
4453                         if "--usepkgonly" not in self.myopts:
4454                                 db_keys = list(portdb._aux_cache_keys)
4455                                 dbs.append((portdb, "ebuild", False, False, db_keys))
4456                         if "--usepkg" in self.myopts:
4457                                 db_keys = list(bindb._aux_cache_keys)
4458                                 dbs.append((bindb,  "binary", True, False, db_keys))
4459                         db_keys = list(trees[myroot]["vartree"].dbapi._aux_cache_keys)
4460                         dbs.append((vardb, "installed", True, True, db_keys))
4461                         self._filtered_trees[myroot]["dbs"] = dbs
4462                         if "--usepkg" in self.myopts:
4463                                 self.trees[myroot]["bintree"].populate(
4464                                         "--getbinpkg" in self.myopts,
4465                                         "--getbinpkgonly" in self.myopts)
4466                 del trees
4467
4468                 self.digraph=portage.digraph()
4469                 # contains all sets added to the graph
4470                 self._sets = {}
4471                 # contains atoms given as arguments
4472                 self._sets["args"] = InternalPackageSet()
4473                 # contains all atoms from all sets added to the graph, including
4474                 # atoms given as arguments
4475                 self._set_atoms = InternalPackageSet()
4476                 self._atom_arg_map = {}
4477                 # contains all nodes pulled in by self._set_atoms
4478                 self._set_nodes = set()
4479                 # Contains only Blocker -> Uninstall edges
4480                 self._blocker_uninstalls = digraph()
4481                 # Contains only Package -> Blocker edges
4482                 self._blocker_parents = digraph()
4483                 # Contains only irrelevant Package -> Blocker edges
4484                 self._irrelevant_blockers = digraph()
4485                 # Contains only unsolvable Package -> Blocker edges
4486                 self._unsolvable_blockers = digraph()
4487                 # Contains all Blocker -> Blocked Package edges
4488                 self._blocked_pkgs = digraph()
4489                 # Contains world packages that have been protected from
4490                 # uninstallation but may not have been added to the graph
4491                 # if the graph is not complete yet.
4492                 self._blocked_world_pkgs = {}
4493                 self._slot_collision_info = {}
4494                 # Slot collision nodes are not allowed to block other packages since
4495                 # blocker validation is only able to account for one package per slot.
4496                 self._slot_collision_nodes = set()
4497                 self._parent_atoms = {}
4498                 self._slot_conflict_parent_atoms = set()
4499                 self._serialized_tasks_cache = None
4500                 self._scheduler_graph = None
4501                 self._displayed_list = None
4502                 self._pprovided_args = []
4503                 self._missing_args = []
4504                 self._masked_installed = set()
4505                 self._unsatisfied_deps_for_display = []
4506                 self._unsatisfied_blockers_for_display = None
4507                 self._circular_deps_for_display = None
4508                 self._dep_stack = []
4509                 self._unsatisfied_deps = []
4510                 self._initially_unsatisfied_deps = []
4511                 self._ignored_deps = []
4512                 self._required_set_names = set(["system", "world"])
4513                 self._select_atoms = self._select_atoms_highest_available
4514                 self._select_package = self._select_pkg_highest_available
4515                 self._highest_pkg_cache = {}
4516
4517         def _show_slot_collision_notice(self):
4518                 """Show an informational message advising the user to mask one of the
4519                 the packages. In some cases it may be possible to resolve this
4520                 automatically, but support for backtracking (removal nodes that have
4521                 already been selected) will be required in order to handle all possible
4522                 cases.
4523                 """
4524
4525                 if not self._slot_collision_info:
4526                         return
4527
4528                 self._show_merge_list()
4529
4530                 msg = []
4531                 msg.append("\n!!! Multiple package instances within a single " + \
4532                         "package slot have been pulled\n")
4533                 msg.append("!!! into the dependency graph, resulting" + \
4534                         " in a slot conflict:\n\n")
4535                 indent = "  "
4536                 # Max number of parents shown, to avoid flooding the display.
4537                 max_parents = 3
4538                 explanation_columns = 70
4539                 explanations = 0
4540                 for (slot_atom, root), slot_nodes \
4541                         in self._slot_collision_info.iteritems():
4542                         msg.append(str(slot_atom))
4543                         msg.append("\n\n")
4544
4545                         for node in slot_nodes:
4546                                 msg.append(indent)
4547                                 msg.append(str(node))
4548                                 parent_atoms = self._parent_atoms.get(node)
4549                                 if parent_atoms:
4550                                         pruned_list = set()
4551                                         # Prefer conflict atoms over others.
4552                                         for parent_atom in parent_atoms:
4553                                                 if len(pruned_list) >= max_parents:
4554                                                         break
4555                                                 if parent_atom in self._slot_conflict_parent_atoms:
4556                                                         pruned_list.add(parent_atom)
4557
4558                                         # If this package was pulled in by conflict atoms then
4559                                         # show those alone since those are the most interesting.
4560                                         if not pruned_list:
4561                                                 # When generating the pruned list, prefer instances
4562                                                 # of DependencyArg over instances of Package.
4563                                                 for parent_atom in parent_atoms:
4564                                                         if len(pruned_list) >= max_parents:
4565                                                                 break
4566                                                         parent, atom = parent_atom
4567                                                         if isinstance(parent, DependencyArg):
4568                                                                 pruned_list.add(parent_atom)
4569                                                 # Prefer Packages instances that themselves have been
4570                                                 # pulled into collision slots.
4571                                                 for parent_atom in parent_atoms:
4572                                                         if len(pruned_list) >= max_parents:
4573                                                                 break
4574                                                         parent, atom = parent_atom
4575                                                         if isinstance(parent, Package) and \
4576                                                                 (parent.slot_atom, parent.root) \
4577                                                                 in self._slot_collision_info:
4578                                                                 pruned_list.add(parent_atom)
4579                                                 for parent_atom in parent_atoms:
4580                                                         if len(pruned_list) >= max_parents:
4581                                                                 break
4582                                                         pruned_list.add(parent_atom)
4583                                         omitted_parents = len(parent_atoms) - len(pruned_list)
4584                                         parent_atoms = pruned_list
4585                                         msg.append(" pulled in by\n")
4586                                         for parent_atom in parent_atoms:
4587                                                 parent, atom = parent_atom
4588                                                 msg.append(2*indent)
4589                                                 if isinstance(parent,
4590                                                         (PackageArg, AtomArg)):
4591                                                         # For PackageArg and AtomArg types, it's
4592                                                         # redundant to display the atom attribute.
4593                                                         msg.append(str(parent))
4594                                                 else:
4595                                                         # Display the specific atom from SetArg or
4596                                                         # Package types.
4597                                                         msg.append("%s required by %s" % (atom, parent))
4598                                                 msg.append("\n")
4599                                         if omitted_parents:
4600                                                 msg.append(2*indent)
4601                                                 msg.append("(and %d more)\n" % omitted_parents)
4602                                 else:
4603                                         msg.append(" (no parents)\n")
4604                                 msg.append("\n")
4605                         explanation = self._slot_conflict_explanation(slot_nodes)
4606                         if explanation:
4607                                 explanations += 1
4608                                 msg.append(indent + "Explanation:\n\n")
4609                                 for line in textwrap.wrap(explanation, explanation_columns):
4610                                         msg.append(2*indent + line + "\n")
4611                                 msg.append("\n")
4612                 msg.append("\n")
4613                 sys.stderr.write("".join(msg))
4614                 sys.stderr.flush()
4615
4616                 explanations_for_all = explanations == len(self._slot_collision_info)
4617
4618                 if explanations_for_all or "--quiet" in self.myopts:
4619                         return
4620
4621                 msg = []
4622                 msg.append("It may be possible to solve this problem ")
4623                 msg.append("by using package.mask to prevent one of ")
4624                 msg.append("those packages from being selected. ")
4625                 msg.append("However, it is also possible that conflicting ")
4626                 msg.append("dependencies exist such that they are impossible to ")
4627                 msg.append("satisfy simultaneously.  If such a conflict exists in ")
4628                 msg.append("the dependencies of two different packages, then those ")
4629                 msg.append("packages can not be installed simultaneously.")
4630
4631                 from formatter import AbstractFormatter, DumbWriter
4632                 f = AbstractFormatter(DumbWriter(sys.stderr, maxcol=72))
4633                 for x in msg:
4634                         f.add_flowing_data(x)
4635                 f.end_paragraph(1)
4636
4637                 msg = []
4638                 msg.append("For more information, see MASKED PACKAGES ")
4639                 msg.append("section in the emerge man page or refer ")
4640                 msg.append("to the Gentoo Handbook.")
4641                 for x in msg:
4642                         f.add_flowing_data(x)
4643                 f.end_paragraph(1)
4644                 f.writer.flush()
4645
4646         def _slot_conflict_explanation(self, slot_nodes):
4647                 """
4648                 When a slot conflict occurs due to USE deps, there are a few
4649                 different cases to consider:
4650
4651                 1) New USE are correctly set but --newuse wasn't requested so an
4652                    installed package with incorrect USE happened to get pulled
4653                    into graph before the new one.
4654
4655                 2) New USE are incorrectly set but an installed package has correct
4656                    USE so it got pulled into the graph, and a new instance also got
4657                    pulled in due to --newuse or an upgrade.
4658
4659                 3) Multiple USE deps exist that can't be satisfied simultaneously,
4660                    and multiple package instances got pulled into the same slot to
4661                    satisfy the conflicting deps.
4662
4663                 Currently, explanations and suggested courses of action are generated
4664                 for cases 1 and 2. Case 3 is too complex to give a useful suggestion.
4665                 """
4666
4667                 if len(slot_nodes) != 2:
4668                         # Suggestions are only implemented for
4669                         # conflicts between two packages.
4670                         return None
4671
4672                 all_conflict_atoms = self._slot_conflict_parent_atoms
4673                 matched_node = None
4674                 matched_atoms = None
4675                 unmatched_node = None
4676                 for node in slot_nodes:
4677                         parent_atoms = self._parent_atoms.get(node)
4678                         if not parent_atoms:
4679                                 # Normally, there are always parent atoms. If there are
4680                                 # none then something unexpected is happening and there's
4681                                 # currently no suggestion for this case.
4682                                 return None
4683                         conflict_atoms = all_conflict_atoms.intersection(parent_atoms)
4684                         for parent_atom in conflict_atoms:
4685                                 parent, atom = parent_atom
4686                                 if not atom.use:
4687                                         # Suggestions are currently only implemented for cases
4688                                         # in which all conflict atoms have USE deps.
4689                                         return None
4690                         if conflict_atoms:
4691                                 if matched_node is not None:
4692                                         # If conflict atoms match multiple nodes
4693                                         # then there's no suggestion.
4694                                         return None
4695                                 matched_node = node
4696                                 matched_atoms = conflict_atoms
4697                         else:
4698                                 if unmatched_node is not None:
4699                                         # Neither node is matched by conflict atoms, and
4700                                         # there is no suggestion for this case.
4701                                         return None
4702                                 unmatched_node = node
4703
4704                 if matched_node is None or unmatched_node is None:
4705                         # This shouldn't happen.
4706                         return None
4707
4708                 if unmatched_node.installed and not matched_node.installed:
4709                         return "New USE are correctly set, but --newuse wasn't" + \
4710                                 " requested, so an installed package with incorrect USE " + \
4711                                 "happened to get pulled into the dependency graph. " + \
4712                                 "In order to solve " + \
4713                                 "this, either specify the --newuse option or explicitly " + \
4714                                 " reinstall '%s'." % matched_node.slot_atom
4715
4716                 if matched_node.installed and not unmatched_node.installed:
4717                         atoms = sorted(set(atom for parent, atom in matched_atoms))
4718                         explanation = ("New USE for '%s' are incorrectly set. " + \
4719                                 "In order to solve this, adjust USE to satisfy '%s'") % \
4720                                 (matched_node.slot_atom, atoms[0])
4721                         if len(atoms) > 1:
4722                                 for atom in atoms[1:-1]:
4723                                         explanation += ", '%s'" % (atom,)
4724                                 if len(atoms) > 2:
4725                                         explanation += ","
4726                                 explanation += " and '%s'" % (atoms[-1],)
4727                         explanation += "."
4728                         return explanation
4729
4730                 return None
4731
4732         def _process_slot_conflicts(self):
4733                 """
4734                 Process slot conflict data to identify specific atoms which
4735                 lead to conflict. These atoms only match a subset of the
4736                 packages that have been pulled into a given slot.
4737                 """
4738                 for (slot_atom, root), slot_nodes \
4739                         in self._slot_collision_info.iteritems():
4740
4741                         all_parent_atoms = set()
4742                         for pkg in slot_nodes:
4743                                 parent_atoms = self._parent_atoms.get(pkg)
4744                                 if not parent_atoms:
4745                                         continue
4746                                 all_parent_atoms.update(parent_atoms)
4747
4748                         for pkg in slot_nodes:
4749                                 parent_atoms = self._parent_atoms.get(pkg)
4750                                 if parent_atoms is None:
4751                                         parent_atoms = set()
4752                                         self._parent_atoms[pkg] = parent_atoms
4753                                 for parent_atom in all_parent_atoms:
4754                                         if parent_atom in parent_atoms:
4755                                                 continue
4756                                         # Use package set for matching since it will match via
4757                                         # PROVIDE when necessary, while match_from_list does not.
4758                                         parent, atom = parent_atom
4759                                         atom_set = InternalPackageSet(
4760                                                 initial_atoms=(atom,))
4761                                         if atom_set.findAtomForPackage(pkg):
4762                                                 parent_atoms.add(parent_atom)
4763                                         else:
4764                                                 self._slot_conflict_parent_atoms.add(parent_atom)
4765
4766         def _reinstall_for_flags(self, forced_flags,
4767                 orig_use, orig_iuse, cur_use, cur_iuse):
4768                 """Return a set of flags that trigger reinstallation, or None if there
4769                 are no such flags."""
4770                 if "--newuse" in self.myopts:
4771                         flags = set(orig_iuse.symmetric_difference(
4772                                 cur_iuse).difference(forced_flags))
4773                         flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
4774                                 cur_iuse.intersection(cur_use)))
4775                         if flags:
4776                                 return flags
4777                 elif "changed-use" == self.myopts.get("--reinstall"):
4778                         flags = orig_iuse.intersection(orig_use).symmetric_difference(
4779                                 cur_iuse.intersection(cur_use))
4780                         if flags:
4781                                 return flags
4782                 return None
4783
4784         def _create_graph(self, allow_unsatisfied=False):
4785                 dep_stack = self._dep_stack
4786                 while dep_stack:
4787                         self.spinner.update()
4788                         dep = dep_stack.pop()
4789                         if isinstance(dep, Package):
4790                                 if not self._add_pkg_deps(dep,
4791                                         allow_unsatisfied=allow_unsatisfied):
4792                                         return 0
4793                                 continue
4794                         if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
4795                                 return 0
4796                 return 1
4797
4798         def _add_dep(self, dep, allow_unsatisfied=False):
4799                 debug = "--debug" in self.myopts
4800                 buildpkgonly = "--buildpkgonly" in self.myopts
4801                 nodeps = "--nodeps" in self.myopts
4802                 empty = "empty" in self.myparams
4803                 deep = "deep" in self.myparams
4804                 update = "--update" in self.myopts and dep.depth <= 1
4805                 if dep.blocker:
4806                         if not buildpkgonly and \
4807                                 not nodeps and \
4808                                 dep.parent not in self._slot_collision_nodes:
4809                                 if dep.parent.onlydeps:
4810                                         # It's safe to ignore blockers if the
4811                                         # parent is an --onlydeps node.
4812                                         return 1
4813                                 # The blocker applies to the root where
4814                                 # the parent is or will be installed.
4815                                 blocker = Blocker(atom=dep.atom,
4816                                         eapi=dep.parent.metadata["EAPI"],
4817                                         root=dep.parent.root)
4818                                 self._blocker_parents.add(blocker, dep.parent)
4819                         return 1
4820                 dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
4821                         onlydeps=dep.onlydeps)
4822                 if not dep_pkg:
4823                         if allow_unsatisfied:
4824                                 self._unsatisfied_deps.append(dep)
4825                                 return 1
4826                         self._unsatisfied_deps_for_display.append(
4827                                 ((dep.root, dep.atom), {"myparent":dep.parent}))
4828                         return 0
4829                 # In some cases, dep_check will return deps that shouldn't
4830                 # be proccessed any further, so they are identified and
4831                 # discarded here. Try to discard as few as possible since
4832                 # discarded dependencies reduce the amount of information
4833                 # available for optimization of merge order.
4834                 if dep.priority.satisfied and \
4835                         not (existing_node or empty or deep or update):
4836                         myarg = None
4837                         if dep.root == self.target_root:
4838                                 try:
4839                                         myarg = self._iter_atoms_for_pkg(dep_pkg).next()
4840                                 except StopIteration:
4841                                         pass
4842                                 except portage.exception.InvalidDependString:
4843                                         if not dep_pkg.installed:
4844                                                 # This shouldn't happen since the package
4845                                                 # should have been masked.
4846                                                 raise
4847                         if not myarg:
4848                                 self._ignored_deps.append(dep)
4849                                 return 1
4850
4851                 if not self._add_pkg(dep_pkg, dep):
4852                         return 0
4853                 return 1
4854
4855         def _add_pkg(self, pkg, dep):
4856                 myparent = None
4857                 priority = None
4858                 depth = 0
4859                 if dep is None:
4860                         dep = Dependency()
4861                 else:
4862                         myparent = dep.parent
4863                         priority = dep.priority
4864                         depth = dep.depth
4865                 if priority is None:
4866                         priority = DepPriority()
4867                 """
4868                 Fills the digraph with nodes comprised of packages to merge.
4869                 mybigkey is the package spec of the package to merge.
4870                 myparent is the package depending on mybigkey ( or None )
4871                 addme = Should we add this package to the digraph or are we just looking at it's deps?
4872                         Think --onlydeps, we need to ignore packages in that case.
4873                 #stuff to add:
4874                 #SLOT-aware emerge
4875                 #IUSE-aware emerge -> USE DEP aware depgraph
4876                 #"no downgrade" emerge
4877                 """
4878                 # Ensure that the dependencies of the same package
4879                 # are never processed more than once.
4880                 previously_added = pkg in self.digraph
4881
4882                 # select the correct /var database that we'll be checking against
4883                 vardbapi = self.trees[pkg.root]["vartree"].dbapi
4884                 pkgsettings = self.pkgsettings[pkg.root]
4885
4886                 arg_atoms = None
4887                 if True:
4888                         try:
4889                                 arg_atoms = list(self._iter_atoms_for_pkg(pkg))
4890                         except portage.exception.InvalidDependString, e:
4891                                 if not pkg.installed:
4892                                         show_invalid_depstring_notice(
4893                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4894                                         return 0
4895                                 del e
4896
4897                 if not pkg.onlydeps:
4898                         if not pkg.installed and \
4899                                 "empty" not in self.myparams and \
4900                                 vardbapi.match(pkg.slot_atom):
4901                                 # Increase the priority of dependencies on packages that
4902                                 # are being rebuilt. This optimizes merge order so that
4903                                 # dependencies are rebuilt/updated as soon as possible,
4904                                 # which is needed especially when emerge is called by
4905                                 # revdep-rebuild since dependencies may be affected by ABI
4906                                 # breakage that has rendered them useless. Don't adjust
4907                                 # priority here when in "empty" mode since all packages
4908                                 # are being merged in that case.
4909                                 priority.rebuild = True
4910
4911                         existing_node = self._slot_pkg_map[pkg.root].get(pkg.slot_atom)
4912                         slot_collision = False
4913                         if existing_node:
4914                                 existing_node_matches = pkg.cpv == existing_node.cpv
4915                                 if existing_node_matches and \
4916                                         pkg != existing_node and \
4917                                         dep.atom is not None:
4918                                         # Use package set for matching since it will match via
4919                                         # PROVIDE when necessary, while match_from_list does not.
4920                                         atom_set = InternalPackageSet(initial_atoms=[dep.atom])
4921                                         if not atom_set.findAtomForPackage(existing_node):
4922                                                 existing_node_matches = False
4923                                 if existing_node_matches:
4924                                         # The existing node can be reused.
4925                                         if arg_atoms:
4926                                                 for parent_atom in arg_atoms:
4927                                                         parent, atom = parent_atom
4928                                                         self.digraph.add(existing_node, parent,
4929                                                                 priority=priority)
4930                                                         self._add_parent_atom(existing_node, parent_atom)
4931                                         # If a direct circular dependency is not an unsatisfied
4932                                         # buildtime dependency then drop it here since otherwise
4933                                         # it can skew the merge order calculation in an unwanted
4934                                         # way.
4935                                         if existing_node != myparent or \
4936                                                 (priority.buildtime and not priority.satisfied):
4937                                                 self.digraph.addnode(existing_node, myparent,
4938                                                         priority=priority)
4939                                                 if dep.atom is not None and dep.parent is not None:
4940                                                         self._add_parent_atom(existing_node,
4941                                                                 (dep.parent, dep.atom))
4942                                         return 1
4943                                 else:
4944
4945                                         # A slot collision has occurred.  Sometimes this coincides
4946                                         # with unresolvable blockers, so the slot collision will be
4947                                         # shown later if there are no unresolvable blockers.
4948                                         self._add_slot_conflict(pkg)
4949                                         slot_collision = True
4950
4951                         if slot_collision:
4952                                 # Now add this node to the graph so that self.display()
4953                                 # can show use flags and --tree portage.output.  This node is
4954                                 # only being partially added to the graph.  It must not be
4955                                 # allowed to interfere with the other nodes that have been
4956                                 # added.  Do not overwrite data for existing nodes in
4957                                 # self.mydbapi since that data will be used for blocker
4958                                 # validation.
4959                                 # Even though the graph is now invalid, continue to process
4960                                 # dependencies so that things like --fetchonly can still
4961                                 # function despite collisions.
4962                                 pass
4963                         else:
4964                                 self._slot_pkg_map[pkg.root][pkg.slot_atom] = pkg
4965                                 self.mydbapi[pkg.root].cpv_inject(pkg)
4966
4967                         if not pkg.installed:
4968                                 # Allow this package to satisfy old-style virtuals in case it
4969                                 # doesn't already. Any pre-existing providers will be preferred
4970                                 # over this one.
4971                                 try:
4972                                         pkgsettings.setinst(pkg.cpv, pkg.metadata)
4973                                         # For consistency, also update the global virtuals.
4974                                         settings = self.roots[pkg.root].settings
4975                                         settings.unlock()
4976                                         settings.setinst(pkg.cpv, pkg.metadata)
4977                                         settings.lock()
4978                                 except portage.exception.InvalidDependString, e:
4979                                         show_invalid_depstring_notice(
4980                                                 pkg, pkg.metadata["PROVIDE"], str(e))
4981                                         del e
4982                                         return 0
4983
4984                 if arg_atoms:
4985                         self._set_nodes.add(pkg)
4986
4987                 # Do this even when addme is False (--onlydeps) so that the
4988                 # parent/child relationship is always known in case
4989                 # self._show_slot_collision_notice() needs to be called later.
4990                 self.digraph.add(pkg, myparent, priority=priority)
4991                 if dep.atom is not None and dep.parent is not None:
4992                         self._add_parent_atom(pkg, (dep.parent, dep.atom))
4993
4994                 if arg_atoms:
4995                         for parent_atom in arg_atoms:
4996                                 parent, atom = parent_atom
4997                                 self.digraph.add(pkg, parent, priority=priority)
4998                                 self._add_parent_atom(pkg, parent_atom)
4999
5000                 """ This section determines whether we go deeper into dependencies or not.
5001                     We want to go deeper on a few occasions:
5002                     Installing package A, we need to make sure package A's deps are met.
5003                     emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
5004                     If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
5005                 """
5006                 dep_stack = self._dep_stack
5007                 if "recurse" not in self.myparams:
5008                         return 1
5009                 elif pkg.installed and \
5010                         "deep" not in self.myparams:
5011                         dep_stack = self._ignored_deps
5012
5013                 self.spinner.update()
5014
5015                 if arg_atoms:
5016                         depth = 0
5017                 pkg.depth = depth
5018                 if not previously_added:
5019                         dep_stack.append(pkg)
5020                 return 1
5021
5022         def _add_parent_atom(self, pkg, parent_atom):
5023                 parent_atoms = self._parent_atoms.get(pkg)
5024                 if parent_atoms is None:
5025                         parent_atoms = set()
5026                         self._parent_atoms[pkg] = parent_atoms
5027                 parent_atoms.add(parent_atom)
5028
5029         def _add_slot_conflict(self, pkg):
5030                 self._slot_collision_nodes.add(pkg)
5031                 slot_key = (pkg.slot_atom, pkg.root)
5032                 slot_nodes = self._slot_collision_info.get(slot_key)
5033                 if slot_nodes is None:
5034                         slot_nodes = set()
5035                         slot_nodes.add(self._slot_pkg_map[pkg.root][pkg.slot_atom])
5036                         self._slot_collision_info[slot_key] = slot_nodes
5037                 slot_nodes.add(pkg)
5038
5039         def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
5040
5041                 mytype = pkg.type_name
5042                 myroot = pkg.root
5043                 mykey = pkg.cpv
5044                 metadata = pkg.metadata
5045                 myuse = pkg.use.enabled
5046                 jbigkey = pkg
5047                 depth = pkg.depth + 1
5048                 removal_action = "remove" in self.myparams
5049
5050                 edepend={}
5051                 depkeys = ["DEPEND","RDEPEND","PDEPEND"]
5052                 for k in depkeys:
5053                         edepend[k] = metadata[k]
5054
5055                 if not pkg.built and \
5056                         "--buildpkgonly" in self.myopts and \
5057                         "deep" not in self.myparams and \
5058                         "empty" not in self.myparams:
5059                         edepend["RDEPEND"] = ""
5060                         edepend["PDEPEND"] = ""
5061                 bdeps_satisfied = False
5062                 
5063                 if pkg.built and not removal_action:
5064                         if self.myopts.get("--with-bdeps", "n") == "y":
5065                                 # Pull in build time deps as requested, but marked them as
5066                                 # "satisfied" since they are not strictly required. This allows
5067                                 # more freedom in the merge order calculation for solving
5068                                 # circular dependencies. Don't convert to PDEPEND since that
5069                                 # could make --with-bdeps=y less effective if it is used to
5070                                 # adjust merge order to prevent built_with_use() calls from
5071                                 # failing.
5072                                 bdeps_satisfied = True
5073                         else:
5074                                 # built packages do not have build time dependencies.
5075                                 edepend["DEPEND"] = ""
5076
5077                 if removal_action and self.myopts.get("--with-bdeps", "y") == "n":
5078                         edepend["DEPEND"] = ""
5079
5080                 deps = (
5081                         ("/", edepend["DEPEND"],
5082                                 self._priority(buildtime=True, satisfied=bdeps_satisfied)),
5083                         (myroot, edepend["RDEPEND"], self._priority(runtime=True)),
5084                         (myroot, edepend["PDEPEND"], self._priority(runtime_post=True))
5085                 )
5086
5087                 debug = "--debug" in self.myopts
5088                 strict = mytype != "installed"
5089                 try:
5090                         for dep_root, dep_string, dep_priority in deps:
5091                                 if pkg.onlydeps:
5092                                         # Decrease priority so that --buildpkgonly
5093                                         # hasallzeros() works correctly.
5094                                         dep_priority = DepPriority()
5095                                 if not dep_string:
5096                                         continue
5097                                 if debug:
5098                                         print
5099                                         print "Parent:   ", jbigkey
5100                                         print "Depstring:", dep_string
5101                                         print "Priority:", dep_priority
5102                                 vardb = self.roots[dep_root].trees["vartree"].dbapi
5103                                 try:
5104                                         selected_atoms = self._select_atoms(dep_root,
5105                                                 dep_string, myuse=myuse, parent=pkg, strict=strict)
5106                                 except portage.exception.InvalidDependString, e:
5107                                         show_invalid_depstring_notice(jbigkey, dep_string, str(e))
5108                                         return 0
5109                                 if debug:
5110                                         print "Candidates:", selected_atoms
5111
5112                                 for atom in selected_atoms:
5113                                         try:
5114
5115                                                 atom = portage.dep.Atom(atom)
5116
5117                                                 mypriority = dep_priority.copy()
5118                                                 if not atom.blocker and vardb.match(atom):
5119                                                         mypriority.satisfied = True
5120
5121                                                 if not self._add_dep(Dependency(atom=atom,
5122                                                         blocker=atom.blocker, depth=depth, parent=pkg,
5123                                                         priority=mypriority, root=dep_root),
5124                                                         allow_unsatisfied=allow_unsatisfied):
5125                                                         return 0
5126
5127                                         except portage.exception.InvalidAtom, e:
5128                                                 show_invalid_depstring_notice(
5129                                                         pkg, dep_string, str(e))
5130                                                 del e
5131                                                 if not pkg.installed:
5132                                                         return 0
5133
5134                                 if debug:
5135                                         print "Exiting...", jbigkey
5136                 except portage.exception.AmbiguousPackageName, e:
5137                         pkgs = e.args[0]
5138                         portage.writemsg("\n\n!!! An atom in the dependencies " + \
5139                                 "is not fully-qualified. Multiple matches:\n\n", noiselevel=-1)
5140                         for cpv in pkgs:
5141                                 portage.writemsg("    %s\n" % cpv, noiselevel=-1)
5142                         portage.writemsg("\n", noiselevel=-1)
5143                         if mytype == "binary":
5144                                 portage.writemsg(
5145                                         "!!! This binary package cannot be installed: '%s'\n" % \
5146                                         mykey, noiselevel=-1)
5147                         elif mytype == "ebuild":
5148                                 portdb = self.roots[myroot].trees["porttree"].dbapi
5149                                 myebuild, mylocation = portdb.findname2(mykey)
5150                                 portage.writemsg("!!! This ebuild cannot be installed: " + \
5151                                         "'%s'\n" % myebuild, noiselevel=-1)
5152                         portage.writemsg("!!! Please notify the package maintainer " + \
5153                                 "that atoms must be fully-qualified.\n", noiselevel=-1)
5154                         return 0
5155                 return 1
5156
5157         def _priority(self, **kwargs):
5158                 if "remove" in self.myparams:
5159                         priority_constructor = UnmergeDepPriority
5160                 else:
5161                         priority_constructor = DepPriority
5162                 return priority_constructor(**kwargs)
5163
5164         def _dep_expand(self, root_config, atom_without_category):
5165                 """
5166                 @param root_config: a root config instance
5167                 @type root_config: RootConfig
5168                 @param atom_without_category: an atom without a category component
5169                 @type atom_without_category: String
5170                 @rtype: list
5171                 @returns: a list of atoms containing categories (possibly empty)
5172                 """
5173                 null_cp = portage.dep_getkey(insert_category_into_atom(
5174                         atom_without_category, "null"))
5175                 cat, atom_pn = portage.catsplit(null_cp)
5176
5177                 cp_set = set()
5178                 for db, pkg_type, built, installed, db_keys in \
5179                         self._filtered_trees[root_config.root]["dbs"]:
5180                         cp_set.update(db.cp_all())
5181                 for cp in list(cp_set):
5182                         cat, pn = portage.catsplit(cp)
5183                         if pn != atom_pn:
5184                                 cp_set.discard(cp)
5185                 deps = []
5186                 for cp in cp_set:
5187                         cat, pn = portage.catsplit(cp)
5188                         deps.append(insert_category_into_atom(
5189                                 atom_without_category, cat))
5190                 return deps
5191
5192         def _have_new_virt(self, root, atom_cp):
5193                 ret = False
5194                 for db, pkg_type, built, installed, db_keys in \
5195                         self._filtered_trees[root]["dbs"]:
5196                         if db.cp_list(atom_cp):
5197                                 ret = True
5198                                 break
5199                 return ret
5200
5201         def _iter_atoms_for_pkg(self, pkg):
5202                 # TODO: add multiple $ROOT support
5203                 if pkg.root != self.target_root:
5204                         return
5205                 atom_arg_map = self._atom_arg_map
5206                 root_config = self.roots[pkg.root]
5207                 for atom in self._set_atoms.iterAtomsForPackage(pkg):
5208                         atom_cp = portage.dep_getkey(atom)
5209                         if atom_cp != pkg.cp and \
5210                                 self._have_new_virt(pkg.root, atom_cp):
5211                                 continue
5212                         visible_pkgs = root_config.visible_pkgs.match_pkgs(atom)
5213                         visible_pkgs.reverse() # descending order
5214                         higher_slot = None
5215                         for visible_pkg in visible_pkgs:
5216                                 if visible_pkg.cp != atom_cp:
5217                                         continue
5218                                 if pkg >= visible_pkg:
5219                                         # This is descending order, and we're not
5220                                         # interested in any versions <= pkg given.
5221                                         break
5222                                 if pkg.slot_atom != visible_pkg.slot_atom:
5223                                         higher_slot = visible_pkg
5224                                         break
5225                         if higher_slot is not None:
5226                                 continue
5227                         for arg in atom_arg_map[(atom, pkg.root)]:
5228                                 if isinstance(arg, PackageArg) and \
5229                                         arg.package != pkg:
5230                                         continue
5231                                 yield arg, atom
5232
5233         def select_files(self, myfiles):
5234                 """Given a list of .tbz2s, .ebuilds sets, and deps, create the
5235                 appropriate depgraph and return a favorite list."""
5236                 debug = "--debug" in self.myopts
5237                 root_config = self.roots[self.target_root]
5238                 sets = root_config.sets
5239                 getSetAtoms = root_config.setconfig.getSetAtoms
5240                 myfavorites=[]
5241                 myroot = self.target_root
5242                 dbs = self._filtered_trees[myroot]["dbs"]
5243                 vardb = self.trees[myroot]["vartree"].dbapi
5244                 real_vardb = self._trees_orig[myroot]["vartree"].dbapi
5245                 portdb = self.trees[myroot]["porttree"].dbapi
5246                 bindb = self.trees[myroot]["bintree"].dbapi
5247                 pkgsettings = self.pkgsettings[myroot]
5248                 args = []
5249                 onlydeps = "--onlydeps" in self.myopts
5250                 lookup_owners = []
5251                 for x in myfiles:
5252                         ext = os.path.splitext(x)[1]
5253                         if ext==".tbz2":
5254                                 if not os.path.exists(x):
5255                                         if os.path.exists(
5256                                                 os.path.join(pkgsettings["PKGDIR"], "All", x)):
5257                                                 x = os.path.join(pkgsettings["PKGDIR"], "All", x)
5258                                         elif os.path.exists(
5259                                                 os.path.join(pkgsettings["PKGDIR"], x)):
5260                                                 x = os.path.join(pkgsettings["PKGDIR"], x)
5261                                         else:
5262                                                 print "\n\n!!! Binary package '"+str(x)+"' does not exist."
5263                                                 print "!!! Please ensure the tbz2 exists as specified.\n"
5264                                                 return 0, myfavorites
5265                                 mytbz2=portage.xpak.tbz2(x)
5266                                 mykey=mytbz2.getelements("CATEGORY")[0]+"/"+os.path.splitext(os.path.basename(x))[0]
5267                                 if os.path.realpath(x) != \
5268                                         os.path.realpath(self.trees[myroot]["bintree"].getname(mykey)):
5269                                         print colorize("BAD", "\n*** You need to adjust PKGDIR to emerge this package.\n")
5270                                         return 0, myfavorites
5271                                 db_keys = list(bindb._aux_cache_keys)
5272                                 metadata = izip(db_keys, bindb.aux_get(mykey, db_keys))
5273                                 pkg = Package(type_name="binary", root_config=root_config,
5274                                         cpv=mykey, built=True, metadata=metadata,
5275                                         onlydeps=onlydeps)
5276                                 self._pkg_cache[pkg] = pkg
5277                                 args.append(PackageArg(arg=x, package=pkg,
5278                                         root_config=root_config))
5279                         elif ext==".ebuild":
5280                                 ebuild_path = portage.util.normalize_path(os.path.abspath(x))
5281                                 pkgdir = os.path.dirname(ebuild_path)
5282                                 tree_root = os.path.dirname(os.path.dirname(pkgdir))
5283                                 cp = pkgdir[len(tree_root)+1:]
5284                                 e = portage.exception.PackageNotFound(
5285                                         ("%s is not in a valid portage tree " + \
5286                                         "hierarchy or does not exist") % x)
5287                                 if not portage.isvalidatom(cp):
5288                                         raise e
5289                                 cat = portage.catsplit(cp)[0]
5290                                 mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
5291                                 if not portage.isvalidatom("="+mykey):
5292                                         raise e
5293                                 ebuild_path = portdb.findname(mykey)
5294                                 if ebuild_path:
5295                                         if ebuild_path != os.path.join(os.path.realpath(tree_root),
5296                                                 cp, os.path.basename(ebuild_path)):
5297                                                 print colorize("BAD", "\n*** You need to adjust PORTDIR or PORTDIR_OVERLAY to emerge this package.\n")
5298                                                 return 0, myfavorites
5299                                         if mykey not in portdb.xmatch(
5300                                                 "match-visible", portage.dep_getkey(mykey)):
5301                                                 print colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use")
5302                                                 print colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man")
5303                                                 print colorize("BAD", "*** page for details.")
5304                                                 countdown(int(self.settings["EMERGE_WARNING_DELAY"]),
5305                                                         "Continuing...")
5306                                 else:
5307                                         raise portage.exception.PackageNotFound(
5308                                                 "%s is not in a valid portage tree hierarchy or does not exist" % x)
5309                                 db_keys = list(portdb._aux_cache_keys)
5310                                 metadata = izip(db_keys, portdb.aux_get(mykey, db_keys))
5311                                 pkg = Package(type_name="ebuild", root_config=root_config,
5312                                         cpv=mykey, metadata=metadata, onlydeps=onlydeps)
5313                                 pkgsettings.setcpv(pkg)
5314                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
5315                                 self._pkg_cache[pkg] = pkg
5316                                 args.append(PackageArg(arg=x, package=pkg,
5317                                         root_config=root_config))
5318                         elif x.startswith(os.path.sep):
5319                                 if not x.startswith(myroot):
5320                                         portage.writemsg(("\n\n!!! '%s' does not start with" + \
5321                                                 " $ROOT.\n") % x, noiselevel=-1)
5322                                         return 0, []
5323                                 # Queue these up since it's most efficient to handle
5324                                 # multiple files in a single iter_owners() call.
5325                                 lookup_owners.append(x)
5326                         else:
5327                                 if x in ("system", "world"):
5328                                         x = SETPREFIX + x
5329                                 if x.startswith(SETPREFIX):
5330                                         s = x[len(SETPREFIX):]
5331                                         if s not in sets:
5332                                                 raise portage.exception.PackageSetNotFound(s)
5333                                         if s in self._sets:
5334                                                 continue
5335                                         # Recursively expand sets so that containment tests in
5336                                         # self._get_parent_sets() properly match atoms in nested
5337                                         # sets (like if world contains system).
5338                                         expanded_set = InternalPackageSet(
5339                                                 initial_atoms=getSetAtoms(s))
5340                                         self._sets[s] = expanded_set
5341                                         args.append(SetArg(arg=x, set=expanded_set,
5342                                                 root_config=root_config))
5343                                         continue
5344                                 if not is_valid_package_atom(x):
5345                                         portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
5346                                                 noiselevel=-1)
5347                                         portage.writemsg("!!! Please check ebuild(5) for full details.\n")
5348                                         portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
5349                                         return (0,[])
5350                                 # Don't expand categories or old-style virtuals here unless
5351                                 # necessary. Expansion of old-style virtuals here causes at
5352                                 # least the following problems:
5353                                 #   1) It's more difficult to determine which set(s) an atom
5354                                 #      came from, if any.
5355                                 #   2) It takes away freedom from the resolver to choose other
5356                                 #      possible expansions when necessary.
5357                                 if "/" in x:
5358                                         args.append(AtomArg(arg=x, atom=x,
5359                                                 root_config=root_config))
5360                                         continue
5361                                 expanded_atoms = self._dep_expand(root_config, x)
5362                                 installed_cp_set = set()
5363                                 for atom in expanded_atoms:
5364                                         atom_cp = portage.dep_getkey(atom)
5365                                         if vardb.cp_list(atom_cp):
5366                                                 installed_cp_set.add(atom_cp)
5367                                 if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
5368                                         installed_cp = iter(installed_cp_set).next()
5369                                         expanded_atoms = [atom for atom in expanded_atoms \
5370                                                 if portage.dep_getkey(atom) == installed_cp]
5371
5372                                 if len(expanded_atoms) > 1:
5373                                         print
5374                                         print
5375                                         ambiguous_package_name(x, expanded_atoms, root_config,
5376                                                 self.spinner, self.myopts)
5377                                         return False, myfavorites
5378                                 if expanded_atoms:
5379                                         atom = expanded_atoms[0]
5380                                 else:
5381                                         null_atom = insert_category_into_atom(x, "null")
5382                                         null_cp = portage.dep_getkey(null_atom)
5383                                         cat, atom_pn = portage.catsplit(null_cp)
5384                                         virts_p = root_config.settings.get_virts_p().get(atom_pn)
5385                                         if virts_p:
5386                                                 # Allow the depgraph to choose which virtual.
5387                                                 atom = insert_category_into_atom(x, "virtual")
5388                                         else:
5389                                                 atom = insert_category_into_atom(x, "null")
5390
5391                                 args.append(AtomArg(arg=x, atom=atom,
5392                                         root_config=root_config))
5393
5394                 if lookup_owners:
5395                         relative_paths = []
5396                         search_for_multiple = False
5397                         if len(lookup_owners) > 1:
5398                                 search_for_multiple = True
5399
5400                         for x in lookup_owners:
5401                                 if not search_for_multiple and os.path.isdir(x):
5402                                         search_for_multiple = True
5403                                 relative_paths.append(x[len(myroot):])
5404
5405                         owners = set()
5406                         for pkg, relative_path in \
5407                                 real_vardb._owners.iter_owners(relative_paths):
5408                                 owners.add(pkg.mycpv)
5409                                 if not search_for_multiple:
5410                                         break
5411
5412                         if not owners:
5413                                 portage.writemsg(("\n\n!!! '%s' is not claimed " + \
5414                                         "by any package.\n") % lookup_owners[0], noiselevel=-1)
5415                                 return 0, []
5416
5417                         for cpv in owners:
5418                                 slot = vardb.aux_get(cpv, ["SLOT"])[0]
5419                                 if not slot:
5420                                         # portage now masks packages with missing slot, but it's
5421                                         # possible that one was installed by an older version
5422                                         atom = portage.cpv_getkey(cpv)
5423                                 else:
5424                                         atom = "%s:%s" % (portage.cpv_getkey(cpv), slot)
5425                                 args.append(AtomArg(arg=atom, atom=atom,
5426                                         root_config=root_config))
5427
5428                 if "--update" in self.myopts:
5429                         # In some cases, the greedy slots behavior can pull in a slot that
5430                         # the user would want to uninstall due to it being blocked by a
5431                         # newer version in a different slot. Therefore, it's necessary to
5432                         # detect and discard the any that should be uninstalled. Each time
5433                         # that arguments are updated, package selections are repeated in
5434                         # ensure consistency with the current arguments:
5435                         #
5436                         #  1) Initialize args
5437                         #  2) Select packages and generate initial greedy atoms
5438                         #  3) Update args with greedy atoms
5439                         #  4) Select packages and generate greedy atoms again, while
5440                         #     accounting for any blockers between selected packages
5441                         #  5) Update args with revised greedy atoms
5442
5443                         self._set_args(args)
5444                         greedy_args = []
5445                         for arg in args:
5446                                 greedy_args.append(arg)
5447                                 if not isinstance(arg, AtomArg):
5448                                         continue
5449                                 for atom in self._greedy_slots(arg.root_config, arg.atom):
5450                                         greedy_args.append(
5451                                                 AtomArg(arg=arg.arg, atom=atom,
5452                                                         root_config=arg.root_config))
5453
5454                         self._set_args(greedy_args)
5455                         del greedy_args
5456
5457                         # Revise greedy atoms, accounting for any blockers
5458                         # between selected packages.
5459                         revised_greedy_args = []
5460                         for arg in args:
5461                                 revised_greedy_args.append(arg)
5462                                 if not isinstance(arg, AtomArg):
5463                                         continue
5464                                 for atom in self._greedy_slots(arg.root_config, arg.atom,
5465                                         blocker_lookahead=True):
5466                                         revised_greedy_args.append(
5467                                                 AtomArg(arg=arg.arg, atom=atom,
5468                                                         root_config=arg.root_config))
5469                         args = revised_greedy_args
5470                         del revised_greedy_args
5471
5472                 self._set_args(args)
5473
5474                 myfavorites = set(myfavorites)
5475                 for arg in args:
5476                         if isinstance(arg, (AtomArg, PackageArg)):
5477                                 myfavorites.add(arg.atom)
5478                         elif isinstance(arg, SetArg):
5479                                 myfavorites.add(arg.arg)
5480                 myfavorites = list(myfavorites)
5481
5482                 pprovideddict = pkgsettings.pprovideddict
5483                 if debug:
5484                         portage.writemsg("\n", noiselevel=-1)
5485                 # Order needs to be preserved since a feature of --nodeps
5486                 # is to allow the user to force a specific merge order.
5487                 args.reverse()
5488                 while args:
5489                         arg = args.pop()
5490                         for atom in arg.set:
5491                                 self.spinner.update()
5492                                 dep = Dependency(atom=atom, onlydeps=onlydeps,
5493                                         root=myroot, parent=arg)
5494                                 atom_cp = portage.dep_getkey(atom)
5495                                 try:
5496                                         pprovided = pprovideddict.get(portage.dep_getkey(atom))
5497                                         if pprovided and portage.match_from_list(atom, pprovided):
5498                                                 # A provided package has been specified on the command line.
5499                                                 self._pprovided_args.append((arg, atom))
5500                                                 continue
5501                                         if isinstance(arg, PackageArg):
5502                                                 if not self._add_pkg(arg.package, dep) or \
5503                                                         not self._create_graph():
5504                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5505                                                                 "dependencies for %s\n") % arg.arg)
5506                                                         return 0, myfavorites
5507                                                 continue
5508                                         if debug:
5509                                                 portage.writemsg("      Arg: %s\n     Atom: %s\n" % \
5510                                                         (arg, atom), noiselevel=-1)
5511                                         pkg, existing_node = self._select_package(
5512                                                 myroot, atom, onlydeps=onlydeps)
5513                                         if not pkg:
5514                                                 if not (isinstance(arg, SetArg) and \
5515                                                         arg.name in ("system", "world")):
5516                                                         self._unsatisfied_deps_for_display.append(
5517                                                                 ((myroot, atom), {}))
5518                                                         return 0, myfavorites
5519                                                 self._missing_args.append((arg, atom))
5520                                                 continue
5521                                         if atom_cp != pkg.cp:
5522                                                 # For old-style virtuals, we need to repeat the
5523                                                 # package.provided check against the selected package.
5524                                                 expanded_atom = atom.replace(atom_cp, pkg.cp)
5525                                                 pprovided = pprovideddict.get(pkg.cp)
5526                                                 if pprovided and \
5527                                                         portage.match_from_list(expanded_atom, pprovided):
5528                                                         # A provided package has been
5529                                                         # specified on the command line.
5530                                                         self._pprovided_args.append((arg, atom))
5531                                                         continue
5532                                         if pkg.installed and "selective" not in self.myparams:
5533                                                 self._unsatisfied_deps_for_display.append(
5534                                                         ((myroot, atom), {}))
5535                                                 # Previous behavior was to bail out in this case, but
5536                                                 # since the dep is satisfied by the installed package,
5537                                                 # it's more friendly to continue building the graph
5538                                                 # and just show a warning message. Therefore, only bail
5539                                                 # out here if the atom is not from either the system or
5540                                                 # world set.
5541                                                 if not (isinstance(arg, SetArg) and \
5542                                                         arg.name in ("system", "world")):
5543                                                         return 0, myfavorites
5544
5545                                         # Add the selected package to the graph as soon as possible
5546                                         # so that later dep_check() calls can use it as feedback
5547                                         # for making more consistent atom selections.
5548                                         if not self._add_pkg(pkg, dep):
5549                                                 if isinstance(arg, SetArg):
5550                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5551                                                                 "dependencies for %s from %s\n") % \
5552                                                                 (atom, arg.arg))
5553                                                 else:
5554                                                         sys.stderr.write(("\n\n!!! Problem resolving " + \
5555                                                                 "dependencies for %s\n") % atom)
5556                                                 return 0, myfavorites
5557
5558                                 except portage.exception.MissingSignature, e:
5559                                         portage.writemsg("\n\n!!! A missing gpg signature is preventing portage from calculating the\n")
5560                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5561                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5562                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5563                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5564                                         return 0, myfavorites
5565                                 except portage.exception.InvalidSignature, e:
5566                                         portage.writemsg("\n\n!!! An invalid gpg signature is preventing portage from calculating the\n")
5567                                         portage.writemsg("!!! required dependencies. This is a security feature enabled by the admin\n")
5568                                         portage.writemsg("!!! to aid in the detection of malicious intent.\n\n")
5569                                         portage.writemsg("!!! THIS IS A POSSIBLE INDICATION OF TAMPERED FILES -- CHECK CAREFULLY.\n")
5570                                         portage.writemsg("!!! Affected file: %s\n" % (e), noiselevel=-1)
5571                                         return 0, myfavorites
5572                                 except SystemExit, e:
5573                                         raise # Needed else can't exit
5574                                 except Exception, e:
5575                                         print >> sys.stderr, "\n\n!!! Problem in '%s' dependencies." % atom
5576                                         print >> sys.stderr, "!!!", str(e), getattr(e, "__module__", None)
5577                                         raise
5578
5579                 # Now that the root packages have been added to the graph,
5580                 # process the dependencies.
5581                 if not self._create_graph():
5582                         return 0, myfavorites
5583
5584                 missing=0
5585                 if "--usepkgonly" in self.myopts:
5586                         for xs in self.digraph.all_nodes():
5587                                 if not isinstance(xs, Package):
5588                                         continue
5589                                 if len(xs) >= 4 and xs[0] != "binary" and xs[3] == "merge":
5590                                         if missing == 0:
5591                                                 print
5592                                         missing += 1
5593                                         print "Missing binary for:",xs[2]
5594
5595                 try:
5596                         self.altlist()
5597                 except self._unknown_internal_error:
5598                         return False, myfavorites
5599
5600                 # We're true here unless we are missing binaries.
5601                 return (not missing,myfavorites)
5602
5603         def _set_args(self, args):
5604                 """
5605                 Create the "args" package set from atoms and packages given as
5606                 arguments. This method can be called multiple times if necessary.
5607                 The package selection cache is automatically invalidated, since
5608                 arguments influence package selections.
5609                 """
5610                 args_set = self._sets["args"]
5611                 args_set.clear()
5612                 for arg in args:
5613                         if not isinstance(arg, (AtomArg, PackageArg)):
5614                                 continue
5615                         atom = arg.atom
5616                         if atom in args_set:
5617                                 continue
5618                         args_set.add(atom)
5619
5620                 self._set_atoms.clear()
5621                 self._set_atoms.update(chain(*self._sets.itervalues()))
5622                 atom_arg_map = self._atom_arg_map
5623                 atom_arg_map.clear()
5624                 for arg in args:
5625                         for atom in arg.set:
5626                                 atom_key = (atom, arg.root_config.root)
5627                                 refs = atom_arg_map.get(atom_key)
5628                                 if refs is None:
5629                                         refs = []
5630                                         atom_arg_map[atom_key] = refs
5631                                         if arg not in refs:
5632                                                 refs.append(arg)
5633
5634                 # Invalidate the package selection cache, since
5635                 # arguments influence package selections.
5636                 self._highest_pkg_cache.clear()
5637
5638         def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
5639                 """
5640                 Return a list of slot atoms corresponding to installed slots that
5641                 differ from the slot of the highest visible match. Slot atoms that
5642                 would trigger a blocker conflict are automatically discarded,
5643                 potentially allowing automatic uninstallation of older slots when
5644                 appropriate.
5645                 """
5646                 highest_pkg, in_graph = self._select_package(root_config.root, atom)
5647                 if highest_pkg is None:
5648                         return []
5649                 vardb = root_config.trees["vartree"].dbapi
5650                 slots = set()
5651                 for cpv in vardb.match(atom):
5652                         # don't mix new virtuals with old virtuals
5653                         if portage.cpv_getkey(cpv) == highest_pkg.cp:
5654                                 slots.add(vardb.aux_get(cpv, ["SLOT"])[0])
5655
5656                 slots.add(highest_pkg.metadata["SLOT"])
5657                 if len(slots) == 1:
5658                         return []
5659                 greedy_pkgs = []
5660                 slots.remove(highest_pkg.metadata["SLOT"])
5661                 while slots:
5662                         slot = slots.pop()
5663                         slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
5664                         pkg, in_graph = self._select_package(root_config.root, slot_atom)
5665                         if pkg is not None and pkg < highest_pkg:
5666                                 greedy_pkgs.append(pkg)
5667                 if not greedy_pkgs:
5668                         return []
5669                 if not blocker_lookahead:
5670                         return [pkg.slot_atom for pkg in greedy_pkgs]
5671
5672                 blockers = {}
5673                 blocker_dep_keys = ["DEPEND", "PDEPEND", "RDEPEND"]
5674                 for pkg in greedy_pkgs + [highest_pkg]:
5675                         dep_str = " ".join(pkg.metadata[k] for k in blocker_dep_keys)
5676                         try:
5677                                 atoms = self._select_atoms(
5678                                         pkg.root, dep_str, pkg.use.enabled,
5679                                         parent=pkg, strict=True)
5680                         except portage.exception.InvalidDependString:
5681                                 continue
5682                         blocker_atoms = (x for x in atoms if x.blocker)
5683                         blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
5684
5685                 if highest_pkg not in blockers:
5686                         return []
5687
5688                 # filter packages with invalid deps
5689                 greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
5690
5691                 # filter packages that conflict with highest_pkg
5692                 greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
5693                         (blockers[highest_pkg].findAtomForPackage(pkg) or \
5694                         blockers[pkg].findAtomForPackage(highest_pkg))]
5695
5696                 if not greedy_pkgs:
5697                         return []
5698
5699                 # If two packages conflict, discard the lower version.
5700                 discard_pkgs = set()
5701                 greedy_pkgs.sort(reverse=True)
5702                 for pkg1 in greedy_pkgs:
5703                         if pkg1 in discard_pkgs:
5704                                 continue
5705                         for pkg2 in greedy_pkgs:
5706                                 if pkg2 in discard_pkgs:
5707                                         continue
5708                                 if pkg1 is pkg2:
5709                                         continue
5710                                 if blockers[pkg1].findAtomForPackage(pkg2) or \
5711                                         blockers[pkg2].findAtomForPackage(pkg1):
5712                                         # pkg1 > pkg2
5713                                         discard_pkgs.add(pkg2)
5714
5715                 return [pkg.slot_atom for pkg in greedy_pkgs \
5716                         if pkg not in discard_pkgs]
5717
5718         def _select_atoms_from_graph(self, *pargs, **kwargs):
5719                 """
5720                 Prefer atoms matching packages that have already been
5721                 added to the graph or those that are installed and have
5722                 not been scheduled for replacement.
5723                 """
5724                 kwargs["trees"] = self._graph_trees
5725                 return self._select_atoms_highest_available(*pargs, **kwargs)
5726
5727         def _select_atoms_highest_available(self, root, depstring,
5728                 myuse=None, parent=None, strict=True, trees=None):
5729                 """This will raise InvalidDependString if necessary. If trees is
5730                 None then self._filtered_trees is used."""
5731                 pkgsettings = self.pkgsettings[root]
5732                 if trees is None:
5733                         trees = self._filtered_trees
5734                 if True:
5735                         try:
5736                                 if parent is not None:
5737                                         trees[root]["parent"] = parent
5738                                 if not strict:
5739                                         portage.dep._dep_check_strict = False
5740                                 mycheck = portage.dep_check(depstring, None,
5741                                         pkgsettings, myuse=myuse,
5742                                         myroot=root, trees=trees)
5743                         finally:
5744                                 if parent is not None:
5745                                         trees[root].pop("parent")
5746                                 portage.dep._dep_check_strict = True
5747                         if not mycheck[0]:
5748                                 raise portage.exception.InvalidDependString(mycheck[1])
5749                         selected_atoms = mycheck[1]
5750                 return selected_atoms
5751
5752         def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None):
5753                 atom = portage.dep.Atom(atom)
5754                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5755                 atom_without_use = atom
5756                 if atom.use:
5757                         atom_without_use = portage.dep.remove_slot(atom)
5758                         if atom.slot:
5759                                 atom_without_use += ":" + atom.slot
5760                         atom_without_use = portage.dep.Atom(atom_without_use)
5761                 xinfo = '"%s"' % atom
5762                 if arg:
5763                         xinfo='"%s"' % arg
5764                 # Discard null/ from failed cpv_expand category expansion.
5765                 xinfo = xinfo.replace("null/", "")
5766                 masked_packages = []
5767                 missing_use = []
5768                 missing_licenses = []
5769                 have_eapi_mask = False
5770                 pkgsettings = self.pkgsettings[root]
5771                 implicit_iuse = pkgsettings._get_implicit_iuse()
5772                 root_config = self.roots[root]
5773                 portdb = self.roots[root].trees["porttree"].dbapi
5774                 dbs = self._filtered_trees[root]["dbs"]
5775                 for db, pkg_type, built, installed, db_keys in dbs:
5776                         if installed:
5777                                 continue
5778                         match = db.match
5779                         if hasattr(db, "xmatch"):
5780                                 cpv_list = db.xmatch("match-all", atom_without_use)
5781                         else:
5782                                 cpv_list = db.match(atom_without_use)
5783                         # descending order
5784                         cpv_list.reverse()
5785                         for cpv in cpv_list:
5786                                 metadata, mreasons  = get_mask_info(root_config, cpv,
5787                                         pkgsettings, db, pkg_type, built, installed, db_keys)
5788                                 if metadata is not None:
5789                                         pkg = Package(built=built, cpv=cpv,
5790                                                 installed=installed, metadata=metadata,
5791                                                 root_config=root_config)
5792                                         if pkg.cp != atom.cp:
5793                                                 # A cpv can be returned from dbapi.match() as an
5794                                                 # old-style virtual match even in cases when the
5795                                                 # package does not actually PROVIDE the virtual.
5796                                                 # Filter out any such false matches here.
5797                                                 if not atom_set.findAtomForPackage(pkg):
5798                                                         continue
5799                                         if atom.use and not mreasons:
5800                                                 missing_use.append(pkg)
5801                                                 continue
5802                                 masked_packages.append(
5803                                         (root_config, pkgsettings, cpv, metadata, mreasons))
5804
5805                 missing_use_reasons = []
5806                 missing_iuse_reasons = []
5807                 for pkg in missing_use:
5808                         use = pkg.use.enabled
5809                         iuse = implicit_iuse.union(re.escape(x) for x in pkg.iuse.all)
5810                         iuse_re = re.compile("^(%s)$" % "|".join(iuse))
5811                         missing_iuse = []
5812                         for x in atom.use.required:
5813                                 if iuse_re.match(x) is None:
5814                                         missing_iuse.append(x)
5815                         mreasons = []
5816                         if missing_iuse:
5817                                 mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
5818                                 missing_iuse_reasons.append((pkg, mreasons))
5819                         else:
5820                                 need_enable = sorted(atom.use.enabled.difference(use))
5821                                 need_disable = sorted(atom.use.disabled.intersection(use))
5822                                 if need_enable or need_disable:
5823                                         changes = []
5824                                         changes.extend(colorize("red", "+" + x) \
5825                                                 for x in need_enable)
5826                                         changes.extend(colorize("blue", "-" + x) \
5827                                                 for x in need_disable)
5828                                         mreasons.append("Change USE: %s" % " ".join(changes))
5829                                         missing_use_reasons.append((pkg, mreasons))
5830
5831                 if missing_iuse_reasons and not missing_use_reasons:
5832                         missing_use_reasons = missing_iuse_reasons
5833                 elif missing_use_reasons:
5834                         # Only show the latest version.
5835                         del missing_use_reasons[1:]
5836
5837                 if missing_use_reasons:
5838                         print "\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+"."
5839                         print "!!! One of the following packages is required to complete your request:"
5840                         for pkg, mreasons in missing_use_reasons:
5841                                 print "- "+pkg.cpv+" ("+", ".join(mreasons)+")"
5842
5843                 elif masked_packages:
5844                         print "\n!!! " + \
5845                                 colorize("BAD", "All ebuilds that could satisfy ") + \
5846                                 colorize("INFORM", xinfo) + \
5847                                 colorize("BAD", " have been masked.")
5848                         print "!!! One of the following masked packages is required to complete your request:"
5849                         have_eapi_mask = show_masked_packages(masked_packages)
5850                         if have_eapi_mask:
5851                                 print
5852                                 msg = ("The current version of portage supports " + \
5853                                         "EAPI '%s'. You must upgrade to a newer version" + \
5854                                         " of portage before EAPI masked packages can" + \
5855                                         " be installed.") % portage.const.EAPI
5856                                 from textwrap import wrap
5857                                 for line in wrap(msg, 75):
5858                                         print line
5859                         print
5860                         show_mask_docs()
5861                 else:
5862                         print "\nemerge: there are no ebuilds to satisfy "+green(xinfo)+"."
5863
5864                 # Show parent nodes and the argument that pulled them in.
5865                 traversed_nodes = set()
5866                 node = myparent
5867                 msg = []
5868                 while node is not None:
5869                         traversed_nodes.add(node)
5870                         msg.append('(dependency required by "%s" [%s])' % \
5871                                 (colorize('INFORM', str(node.cpv)), node.type_name))
5872                         # When traversing to parents, prefer arguments over packages
5873                         # since arguments are root nodes. Never traverse the same
5874                         # package twice, in order to prevent an infinite loop.
5875                         selected_parent = None
5876                         for parent in self.digraph.parent_nodes(node):
5877                                 if isinstance(parent, DependencyArg):
5878                                         msg.append('(dependency required by "%s" [argument])' % \
5879                                                 (colorize('INFORM', str(parent))))
5880                                         selected_parent = None
5881                                         break
5882                                 if parent not in traversed_nodes:
5883                                         selected_parent = parent
5884                         node = selected_parent
5885                 for line in msg:
5886                         print line
5887
5888                 print
5889
5890         def _select_pkg_highest_available(self, root, atom, onlydeps=False):
5891                 cache_key = (root, atom, onlydeps)
5892                 ret = self._highest_pkg_cache.get(cache_key)
5893                 if ret is not None:
5894                         pkg, existing = ret
5895                         if pkg and not existing:
5896                                 existing = self._slot_pkg_map[root].get(pkg.slot_atom)
5897                                 if existing and existing == pkg:
5898                                         # Update the cache to reflect that the
5899                                         # package has been added to the graph.
5900                                         ret = pkg, pkg
5901                                         self._highest_pkg_cache[cache_key] = ret
5902                         return ret
5903                 ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps)
5904                 self._highest_pkg_cache[cache_key] = ret
5905                 pkg, existing = ret
5906                 if pkg is not None:
5907                         settings = pkg.root_config.settings
5908                         if visible(settings, pkg) and not (pkg.installed and \
5909                                 settings._getMissingKeywords(pkg.cpv, pkg.metadata)):
5910                                 pkg.root_config.visible_pkgs.cpv_inject(pkg)
5911                 return ret
5912
5913         def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False):
5914                 root_config = self.roots[root]
5915                 pkgsettings = self.pkgsettings[root]
5916                 dbs = self._filtered_trees[root]["dbs"]
5917                 vardb = self.roots[root].trees["vartree"].dbapi
5918                 portdb = self.roots[root].trees["porttree"].dbapi
5919                 # List of acceptable packages, ordered by type preference.
5920                 matched_packages = []
5921                 highest_version = None
5922                 if not isinstance(atom, portage.dep.Atom):
5923                         atom = portage.dep.Atom(atom)
5924                 atom_cp = atom.cp
5925                 atom_set = InternalPackageSet(initial_atoms=(atom,))
5926                 existing_node = None
5927                 myeb = None
5928                 usepkgonly = "--usepkgonly" in self.myopts
5929                 empty = "empty" in self.myparams
5930                 selective = "selective" in self.myparams
5931                 reinstall = False
5932                 noreplace = "--noreplace" in self.myopts
5933                 # Behavior of the "selective" parameter depends on
5934                 # whether or not a package matches an argument atom.
5935                 # If an installed package provides an old-style
5936                 # virtual that is no longer provided by an available
5937                 # package, the installed package may match an argument
5938                 # atom even though none of the available packages do.
5939                 # Therefore, "selective" logic does not consider
5940                 # whether or not an installed package matches an
5941                 # argument atom. It only considers whether or not
5942                 # available packages match argument atoms, which is
5943                 # represented by the found_available_arg flag.
5944                 found_available_arg = False
5945                 for find_existing_node in True, False:
5946                         if existing_node:
5947                                 break
5948                         for db, pkg_type, built, installed, db_keys in dbs:
5949                                 if existing_node:
5950                                         break
5951                                 if installed and not find_existing_node:
5952                                         want_reinstall = reinstall or empty or \
5953                                                 (found_available_arg and not selective)
5954                                         if want_reinstall and matched_packages:
5955                                                 continue
5956                                 if hasattr(db, "xmatch"):
5957                                         cpv_list = db.xmatch("match-all", atom)
5958                                 else:
5959                                         cpv_list = db.match(atom)
5960
5961                                 # USE=multislot can make an installed package appear as if
5962                                 # it doesn't satisfy a slot dependency. Rebuilding the ebuild
5963                                 # won't do any good as long as USE=multislot is enabled since
5964                                 # the newly built package still won't have the expected slot.
5965                                 # Therefore, assume that such SLOT dependencies are already
5966                                 # satisfied rather than forcing a rebuild.
5967                                 if installed and not cpv_list and atom.slot:
5968                                         for cpv in db.match(atom.cp):
5969                                                 slot_available = False
5970                                                 for other_db, other_type, other_built, \
5971                                                         other_installed, other_keys in dbs:
5972                                                         try:
5973                                                                 if atom.slot == \
5974                                                                         other_db.aux_get(cpv, ["SLOT"])[0]:
5975                                                                         slot_available = True
5976                                                                         break
5977                                                         except KeyError:
5978                                                                 pass
5979                                                 if not slot_available:
5980                                                         continue
5981                                                 inst_pkg = self._pkg(cpv, "installed",
5982                                                         root_config, installed=installed)
5983                                                 # Remove the slot from the atom and verify that
5984                                                 # the package matches the resulting atom.
5985                                                 atom_without_slot = portage.dep.remove_slot(atom)
5986                                                 if atom.use:
5987                                                         atom_without_slot += str(atom.use)
5988                                                 atom_without_slot = portage.dep.Atom(atom_without_slot)
5989                                                 if portage.match_from_list(
5990                                                         atom_without_slot, [inst_pkg]):
5991                                                         cpv_list = [inst_pkg.cpv]
5992                                                 break
5993
5994                                 if not cpv_list:
5995                                         continue
5996                                 pkg_status = "merge"
5997                                 if installed or onlydeps:
5998                                         pkg_status = "nomerge"
5999                                 # descending order
6000                                 cpv_list.reverse()
6001                                 for cpv in cpv_list:
6002                                         # Make --noreplace take precedence over --newuse.
6003                                         if not installed and noreplace and \
6004                                                 cpv in vardb.match(atom):
6005                                                 # If the installed version is masked, it may
6006                                                 # be necessary to look at lower versions,
6007                                                 # in case there is a visible downgrade.
6008                                                 continue
6009                                         reinstall_for_flags = None
6010                                         cache_key = (pkg_type, root, cpv, pkg_status)
6011                                         calculated_use = True
6012                                         pkg = self._pkg_cache.get(cache_key)
6013                                         if pkg is None:
6014                                                 calculated_use = False
6015                                                 try:
6016                                                         metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6017                                                 except KeyError:
6018                                                         continue
6019                                                 pkg = Package(built=built, cpv=cpv,
6020                                                         installed=installed, metadata=metadata,
6021                                                         onlydeps=onlydeps, root_config=root_config,
6022                                                         type_name=pkg_type)
6023                                                 metadata = pkg.metadata
6024                                                 if not built and ("?" in metadata["LICENSE"] or \
6025                                                         "?" in metadata["PROVIDE"]):
6026                                                         # This is avoided whenever possible because
6027                                                         # it's expensive. It only needs to be done here
6028                                                         # if it has an effect on visibility.
6029                                                         pkgsettings.setcpv(pkg)
6030                                                         metadata["USE"] = pkgsettings["PORTAGE_USE"]
6031                                                         calculated_use = True
6032                                                 self._pkg_cache[pkg] = pkg
6033
6034                                         if not installed or (built and matched_packages):
6035                                                 # Only enforce visibility on installed packages
6036                                                 # if there is at least one other visible package
6037                                                 # available. By filtering installed masked packages
6038                                                 # here, packages that have been masked since they
6039                                                 # were installed can be automatically downgraded
6040                                                 # to an unmasked version.
6041                                                 try:
6042                                                         if not visible(pkgsettings, pkg):
6043                                                                 continue
6044                                                 except portage.exception.InvalidDependString:
6045                                                         if not installed:
6046                                                                 continue
6047
6048                                                 # Enable upgrade or downgrade to a version
6049                                                 # with visible KEYWORDS when the installed
6050                                                 # version is masked by KEYWORDS, but never
6051                                                 # reinstall the same exact version only due
6052                                                 # to a KEYWORDS mask.
6053                                                 if built and matched_packages:
6054
6055                                                         different_version = None
6056                                                         for avail_pkg in matched_packages:
6057                                                                 if not portage.dep.cpvequal(
6058                                                                         pkg.cpv, avail_pkg.cpv):
6059                                                                         different_version = avail_pkg
6060                                                                         break
6061                                                         if different_version is not None:
6062
6063                                                                 if installed and \
6064                                                                         pkgsettings._getMissingKeywords(
6065                                                                         pkg.cpv, pkg.metadata):
6066                                                                         continue
6067
6068                                                                 # If the ebuild no longer exists or it's
6069                                                                 # keywords have been dropped, reject built
6070                                                                 # instances (installed or binary).
6071                                                                 # If --usepkgonly is enabled, assume that
6072                                                                 # the ebuild status should be ignored.
6073                                                                 if not usepkgonly:
6074                                                                         try:
6075                                                                                 pkg_eb = self._pkg(
6076                                                                                         pkg.cpv, "ebuild", root_config)
6077                                                                         except portage.exception.PackageNotFound:
6078                                                                                 continue
6079                                                                         else:
6080                                                                                 if not visible(pkgsettings, pkg_eb):
6081                                                                                         continue
6082
6083                                         if not pkg.built and not calculated_use:
6084                                                 # This is avoided whenever possible because
6085                                                 # it's expensive.
6086                                                 pkgsettings.setcpv(pkg)
6087                                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
6088
6089                                         if pkg.cp != atom.cp:
6090                                                 # A cpv can be returned from dbapi.match() as an
6091                                                 # old-style virtual match even in cases when the
6092                                                 # package does not actually PROVIDE the virtual.
6093                                                 # Filter out any such false matches here.
6094                                                 if not atom_set.findAtomForPackage(pkg):
6095                                                         continue
6096
6097                                         myarg = None
6098                                         if root == self.target_root:
6099                                                 try:
6100                                                         # Ebuild USE must have been calculated prior
6101                                                         # to this point, in case atoms have USE deps.
6102                                                         myarg = self._iter_atoms_for_pkg(pkg).next()
6103                                                 except StopIteration:
6104                                                         pass
6105                                                 except portage.exception.InvalidDependString:
6106                                                         if not installed:
6107                                                                 # masked by corruption
6108                                                                 continue
6109                                         if not installed and myarg:
6110                                                 found_available_arg = True
6111
6112                                         if atom.use and not pkg.built:
6113                                                 use = pkg.use.enabled
6114                                                 if atom.use.enabled.difference(use):
6115                                                         continue
6116                                                 if atom.use.disabled.intersection(use):
6117                                                         continue
6118                                         if pkg.cp == atom_cp:
6119                                                 if highest_version is None:
6120                                                         highest_version = pkg
6121                                                 elif pkg > highest_version:
6122                                                         highest_version = pkg
6123                                         # At this point, we've found the highest visible
6124                                         # match from the current repo. Any lower versions
6125                                         # from this repo are ignored, so this so the loop
6126                                         # will always end with a break statement below
6127                                         # this point.
6128                                         if find_existing_node:
6129                                                 e_pkg = self._slot_pkg_map[root].get(pkg.slot_atom)
6130                                                 if not e_pkg:
6131                                                         break
6132                                                 if portage.dep.match_from_list(atom, [e_pkg]):
6133                                                         if highest_version and \
6134                                                                 e_pkg.cp == atom_cp and \
6135                                                                 e_pkg < highest_version and \
6136                                                                 e_pkg.slot_atom != highest_version.slot_atom:
6137                                                                 # There is a higher version available in a
6138                                                                 # different slot, so this existing node is
6139                                                                 # irrelevant.
6140                                                                 pass
6141                                                         else:
6142                                                                 matched_packages.append(e_pkg)
6143                                                                 existing_node = e_pkg
6144                                                 break
6145                                         # Compare built package to current config and
6146                                         # reject the built package if necessary.
6147                                         if built and not installed and \
6148                                                 ("--newuse" in self.myopts or \
6149                                                 "--reinstall" in self.myopts):
6150                                                 iuses = pkg.iuse.all
6151                                                 old_use = pkg.use.enabled
6152                                                 if myeb:
6153                                                         pkgsettings.setcpv(myeb)
6154                                                 else:
6155                                                         pkgsettings.setcpv(pkg)
6156                                                 now_use = pkgsettings["PORTAGE_USE"].split()
6157                                                 forced_flags = set()
6158                                                 forced_flags.update(pkgsettings.useforce)
6159                                                 forced_flags.update(pkgsettings.usemask)
6160                                                 cur_iuse = iuses
6161                                                 if myeb and not usepkgonly:
6162                                                         cur_iuse = myeb.iuse.all
6163                                                 if self._reinstall_for_flags(forced_flags,
6164                                                         old_use, iuses,
6165                                                         now_use, cur_iuse):
6166                                                         break
6167                                         # Compare current config to installed package
6168                                         # and do not reinstall if possible.
6169                                         if not installed and \
6170                                                 ("--newuse" in self.myopts or \
6171                                                 "--reinstall" in self.myopts) and \
6172                                                 cpv in vardb.match(atom):
6173                                                 pkgsettings.setcpv(pkg)
6174                                                 forced_flags = set()
6175                                                 forced_flags.update(pkgsettings.useforce)
6176                                                 forced_flags.update(pkgsettings.usemask)
6177                                                 old_use = vardb.aux_get(cpv, ["USE"])[0].split()
6178                                                 old_iuse = set(filter_iuse_defaults(
6179                                                         vardb.aux_get(cpv, ["IUSE"])[0].split()))
6180                                                 cur_use = pkgsettings["PORTAGE_USE"].split()
6181                                                 cur_iuse = pkg.iuse.all
6182                                                 reinstall_for_flags = \
6183                                                         self._reinstall_for_flags(
6184                                                         forced_flags, old_use, old_iuse,
6185                                                         cur_use, cur_iuse)
6186                                                 if reinstall_for_flags:
6187                                                         reinstall = True
6188                                         if not built:
6189                                                 myeb = pkg
6190                                         matched_packages.append(pkg)
6191                                         if reinstall_for_flags:
6192                                                 self._reinstall_nodes[pkg] = \
6193                                                         reinstall_for_flags
6194                                         break
6195
6196                 if not matched_packages:
6197                         return None, None
6198
6199                 if "--debug" in self.myopts:
6200                         for pkg in matched_packages:
6201                                 portage.writemsg("%s %s\n" % \
6202                                         ((pkg.type_name + ":").rjust(10), pkg.cpv), noiselevel=-1)
6203
6204                 # Filter out any old-style virtual matches if they are
6205                 # mixed with new-style virtual matches.
6206                 cp = portage.dep_getkey(atom)
6207                 if len(matched_packages) > 1 and \
6208                         "virtual" == portage.catsplit(cp)[0]:
6209                         for pkg in matched_packages:
6210                                 if pkg.cp != cp:
6211                                         continue
6212                                 # Got a new-style virtual, so filter
6213                                 # out any old-style virtuals.
6214                                 matched_packages = [pkg for pkg in matched_packages \
6215                                         if pkg.cp == cp]
6216                                 break
6217
6218                 if len(matched_packages) > 1:
6219                         bestmatch = portage.best(
6220                                 [pkg.cpv for pkg in matched_packages])
6221                         matched_packages = [pkg for pkg in matched_packages \
6222                                 if portage.dep.cpvequal(pkg.cpv, bestmatch)]
6223
6224                 # ordered by type preference ("ebuild" type is the last resort)
6225                 return  matched_packages[-1], existing_node
6226
6227         def _select_pkg_from_graph(self, root, atom, onlydeps=False):
6228                 """
6229                 Select packages that have already been added to the graph or
6230                 those that are installed and have not been scheduled for
6231                 replacement.
6232                 """
6233                 graph_db = self._graph_trees[root]["porttree"].dbapi
6234                 matches = graph_db.match(atom)
6235                 if not matches:
6236                         return None, None
6237                 cpv = matches[-1] # highest match
6238                 slot_atom = "%s:%s" % (portage.cpv_getkey(cpv),
6239                         graph_db.aux_get(cpv, ["SLOT"])[0])
6240                 e_pkg = self._slot_pkg_map[root].get(slot_atom)
6241                 if e_pkg:
6242                         return e_pkg, e_pkg
6243                 # Since this cpv exists in the graph_db,
6244                 # we must have a cached Package instance.
6245                 cache_key = ("installed", root, cpv, "nomerge")
6246                 return (self._pkg_cache[cache_key], None)
6247
6248         def _complete_graph(self):
6249                 """
6250                 Add any deep dependencies of required sets (args, system, world) that
6251                 have not been pulled into the graph yet. This ensures that the graph
6252                 is consistent such that initially satisfied deep dependencies are not
6253                 broken in the new graph. Initially unsatisfied dependencies are
6254                 irrelevant since we only want to avoid breaking dependencies that are
6255                 intially satisfied.
6256
6257                 Since this method can consume enough time to disturb users, it is
6258                 currently only enabled by the --complete-graph option.
6259                 """
6260                 if "--buildpkgonly" in self.myopts or \
6261                         "recurse" not in self.myparams:
6262                         return 1
6263
6264                 if "complete" not in self.myparams:
6265                         # Skip this to avoid consuming enough time to disturb users.
6266                         return 1
6267
6268                 # Put the depgraph into a mode that causes it to only
6269                 # select packages that have already been added to the
6270                 # graph or those that are installed and have not been
6271                 # scheduled for replacement. Also, toggle the "deep"
6272                 # parameter so that all dependencies are traversed and
6273                 # accounted for.
6274                 self._select_atoms = self._select_atoms_from_graph
6275                 self._select_package = self._select_pkg_from_graph
6276                 already_deep = "deep" in self.myparams
6277                 if not already_deep:
6278                         self.myparams.add("deep")
6279
6280                 for root in self.roots:
6281                         required_set_names = self._required_set_names.copy()
6282                         if root == self.target_root and \
6283                                 (already_deep or "empty" in self.myparams):
6284                                 required_set_names.difference_update(self._sets)
6285                         if not required_set_names and not self._ignored_deps:
6286                                 continue
6287                         root_config = self.roots[root]
6288                         setconfig = root_config.setconfig
6289                         args = []
6290                         # Reuse existing SetArg instances when available.
6291                         for arg in self.digraph.root_nodes():
6292                                 if not isinstance(arg, SetArg):
6293                                         continue
6294                                 if arg.root_config != root_config:
6295                                         continue
6296                                 if arg.name in required_set_names:
6297                                         args.append(arg)
6298                                         required_set_names.remove(arg.name)
6299                         # Create new SetArg instances only when necessary.
6300                         for s in required_set_names:
6301                                 expanded_set = InternalPackageSet(
6302                                         initial_atoms=setconfig.getSetAtoms(s))
6303                                 atom = SETPREFIX + s
6304                                 args.append(SetArg(arg=atom, set=expanded_set,
6305                                         root_config=root_config))
6306                         vardb = root_config.trees["vartree"].dbapi
6307                         for arg in args:
6308                                 for atom in arg.set:
6309                                         self._dep_stack.append(
6310                                                 Dependency(atom=atom, root=root, parent=arg))
6311                         if self._ignored_deps:
6312                                 self._dep_stack.extend(self._ignored_deps)
6313                                 self._ignored_deps = []
6314                         if not self._create_graph(allow_unsatisfied=True):
6315                                 return 0
6316                         # Check the unsatisfied deps to see if any initially satisfied deps
6317                         # will become unsatisfied due to an upgrade. Initially unsatisfied
6318                         # deps are irrelevant since we only want to avoid breaking deps
6319                         # that are initially satisfied.
6320                         while self._unsatisfied_deps:
6321                                 dep = self._unsatisfied_deps.pop()
6322                                 matches = vardb.match_pkgs(dep.atom)
6323                                 if not matches:
6324                                         self._initially_unsatisfied_deps.append(dep)
6325                                         continue
6326                                 # An scheduled installation broke a deep dependency.
6327                                 # Add the installed package to the graph so that it
6328                                 # will be appropriately reported as a slot collision
6329                                 # (possibly solvable via backtracking).
6330                                 pkg = matches[-1] # highest match
6331                                 if not self._add_pkg(pkg, dep):
6332                                         return 0
6333                                 if not self._create_graph(allow_unsatisfied=True):
6334                                         return 0
6335                 return 1
6336
6337         def _pkg(self, cpv, type_name, root_config, installed=False):
6338                 """
6339                 Get a package instance from the cache, or create a new
6340                 one if necessary. Raises KeyError from aux_get if it
6341                 failures for some reason (package does not exist or is
6342                 corrupt).
6343                 """
6344                 operation = "merge"
6345                 if installed:
6346                         operation = "nomerge"
6347                 pkg = self._pkg_cache.get(
6348                         (type_name, root_config.root, cpv, operation))
6349                 if pkg is None:
6350                         tree_type = self.pkg_tree_map[type_name]
6351                         db = root_config.trees[tree_type].dbapi
6352                         db_keys = list(self._trees_orig[root_config.root][
6353                                 tree_type].dbapi._aux_cache_keys)
6354                         try:
6355                                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
6356                         except KeyError:
6357                                 raise portage.exception.PackageNotFound(cpv)
6358                         pkg = Package(cpv=cpv, metadata=metadata,
6359                                 root_config=root_config, installed=installed)
6360                         if type_name == "ebuild":
6361                                 settings = self.pkgsettings[root_config.root]
6362                                 settings.setcpv(pkg)
6363                                 pkg.metadata["USE"] = settings["PORTAGE_USE"]
6364                         self._pkg_cache[pkg] = pkg
6365                 return pkg
6366
6367         def validate_blockers(self):
6368                 """Remove any blockers from the digraph that do not match any of the
6369                 packages within the graph.  If necessary, create hard deps to ensure
6370                 correct merge order such that mutually blocking packages are never
6371                 installed simultaneously."""
6372
6373                 if "--buildpkgonly" in self.myopts or \
6374                         "--nodeps" in self.myopts:
6375                         return True
6376
6377                 #if "deep" in self.myparams:
6378                 if True:
6379                         # Pull in blockers from all installed packages that haven't already
6380                         # been pulled into the depgraph.  This is not enabled by default
6381                         # due to the performance penalty that is incurred by all the
6382                         # additional dep_check calls that are required.
6383
6384                         dep_keys = ["DEPEND","RDEPEND","PDEPEND"]
6385                         for myroot in self.trees:
6386                                 vardb = self.trees[myroot]["vartree"].dbapi
6387                                 portdb = self.trees[myroot]["porttree"].dbapi
6388                                 pkgsettings = self.pkgsettings[myroot]
6389                                 final_db = self.mydbapi[myroot]
6390
6391                                 blocker_cache = BlockerCache(myroot, vardb)
6392                                 stale_cache = set(blocker_cache)
6393                                 for pkg in vardb:
6394                                         cpv = pkg.cpv
6395                                         stale_cache.discard(cpv)
6396                                         pkg_in_graph = self.digraph.contains(pkg)
6397
6398                                         # Check for masked installed packages. Only warn about
6399                                         # packages that are in the graph in order to avoid warning
6400                                         # about those that will be automatically uninstalled during
6401                                         # the merge process or by --depclean.
6402                                         if pkg in final_db:
6403                                                 if pkg_in_graph and not visible(pkgsettings, pkg):
6404                                                         self._masked_installed.add(pkg)
6405
6406                                         blocker_atoms = None
6407                                         blockers = None
6408                                         if pkg_in_graph:
6409                                                 blockers = []
6410                                                 try:
6411                                                         blockers.extend(
6412                                                                 self._blocker_parents.child_nodes(pkg))
6413                                                 except KeyError:
6414                                                         pass
6415                                                 try:
6416                                                         blockers.extend(
6417                                                                 self._irrelevant_blockers.child_nodes(pkg))
6418                                                 except KeyError:
6419                                                         pass
6420                                         if blockers is not None:
6421                                                 blockers = set(str(blocker.atom) \
6422                                                         for blocker in blockers)
6423
6424                                         # If this node has any blockers, create a "nomerge"
6425                                         # node for it so that they can be enforced.
6426                                         self.spinner.update()
6427                                         blocker_data = blocker_cache.get(cpv)
6428                                         if blocker_data is not None and \
6429                                                 blocker_data.counter != long(pkg.metadata["COUNTER"]):
6430                                                 blocker_data = None
6431
6432                                         # If blocker data from the graph is available, use
6433                                         # it to validate the cache and update the cache if
6434                                         # it seems invalid.
6435                                         if blocker_data is not None and \
6436                                                 blockers is not None:
6437                                                 if not blockers.symmetric_difference(
6438                                                         blocker_data.atoms):
6439                                                         continue
6440                                                 blocker_data = None
6441
6442                                         if blocker_data is None and \
6443                                                 blockers is not None:
6444                                                 # Re-use the blockers from the graph.
6445                                                 blocker_atoms = sorted(blockers)
6446                                                 counter = long(pkg.metadata["COUNTER"])
6447                                                 blocker_data = \
6448                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6449                                                 blocker_cache[pkg.cpv] = blocker_data
6450                                                 continue
6451
6452                                         if blocker_data:
6453                                                 blocker_atoms = blocker_data.atoms
6454                                         else:
6455                                                 # Use aux_get() to trigger FakeVartree global
6456                                                 # updates on *DEPEND when appropriate.
6457                                                 depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6458                                                 # It is crucial to pass in final_db here in order to
6459                                                 # optimize dep_check calls by eliminating atoms via
6460                                                 # dep_wordreduce and dep_eval calls.
6461                                                 try:
6462                                                         portage.dep._dep_check_strict = False
6463                                                         try:
6464                                                                 success, atoms = portage.dep_check(depstr,
6465                                                                         final_db, pkgsettings, myuse=pkg.use.enabled,
6466                                                                         trees=self._graph_trees, myroot=myroot)
6467                                                         except Exception, e:
6468                                                                 if isinstance(e, SystemExit):
6469                                                                         raise
6470                                                                 # This is helpful, for example, if a ValueError
6471                                                                 # is thrown from cpv_expand due to multiple
6472                                                                 # matches (this can happen if an atom lacks a
6473                                                                 # category).
6474                                                                 show_invalid_depstring_notice(
6475                                                                         pkg, depstr, str(e))
6476                                                                 del e
6477                                                                 raise
6478                                                 finally:
6479                                                         portage.dep._dep_check_strict = True
6480                                                 if not success:
6481                                                         replacement_pkg = final_db.match_pkgs(pkg.slot_atom)
6482                                                         if replacement_pkg and \
6483                                                                 replacement_pkg[0].operation == "merge":
6484                                                                 # This package is being replaced anyway, so
6485                                                                 # ignore invalid dependencies so as not to
6486                                                                 # annoy the user too much (otherwise they'd be
6487                                                                 # forced to manually unmerge it first).
6488                                                                 continue
6489                                                         show_invalid_depstring_notice(pkg, depstr, atoms)
6490                                                         return False
6491                                                 blocker_atoms = [myatom for myatom in atoms \
6492                                                         if myatom.startswith("!")]
6493                                                 blocker_atoms.sort()
6494                                                 counter = long(pkg.metadata["COUNTER"])
6495                                                 blocker_cache[cpv] = \
6496                                                         blocker_cache.BlockerData(counter, blocker_atoms)
6497                                         if blocker_atoms:
6498                                                 try:
6499                                                         for atom in blocker_atoms:
6500                                                                 blocker = Blocker(atom=portage.dep.Atom(atom),
6501                                                                         eapi=pkg.metadata["EAPI"], root=myroot)
6502                                                                 self._blocker_parents.add(blocker, pkg)
6503                                                 except portage.exception.InvalidAtom, e:
6504                                                         depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
6505                                                         show_invalid_depstring_notice(
6506                                                                 pkg, depstr, "Invalid Atom: %s" % (e,))
6507                                                         return False
6508                                 for cpv in stale_cache:
6509                                         del blocker_cache[cpv]
6510                                 blocker_cache.flush()
6511                                 del blocker_cache
6512
6513                 # Discard any "uninstall" tasks scheduled by previous calls
6514                 # to this method, since those tasks may not make sense given
6515                 # the current graph state.
6516                 previous_uninstall_tasks = self._blocker_uninstalls.leaf_nodes()
6517                 if previous_uninstall_tasks:
6518                         self._blocker_uninstalls = digraph()
6519                         self.digraph.difference_update(previous_uninstall_tasks)
6520
6521                 for blocker in self._blocker_parents.leaf_nodes():
6522                         self.spinner.update()
6523                         root_config = self.roots[blocker.root]
6524                         virtuals = root_config.settings.getvirtuals()
6525                         myroot = blocker.root
6526                         initial_db = self.trees[myroot]["vartree"].dbapi
6527                         final_db = self.mydbapi[myroot]
6528                         
6529                         provider_virtual = False
6530                         if blocker.cp in virtuals and \
6531                                 not self._have_new_virt(blocker.root, blocker.cp):
6532                                 provider_virtual = True
6533
6534                         if provider_virtual:
6535                                 atoms = []
6536                                 for provider_entry in virtuals[blocker.cp]:
6537                                         provider_cp = \
6538                                                 portage.dep_getkey(provider_entry)
6539                                         atoms.append(blocker.atom.replace(
6540                                                 blocker.cp, provider_cp))
6541                         else:
6542                                 atoms = [blocker.atom]
6543
6544                         blocked_initial = []
6545                         for atom in atoms:
6546                                 blocked_initial.extend(initial_db.match_pkgs(atom))
6547
6548                         blocked_final = []
6549                         for atom in atoms:
6550                                 blocked_final.extend(final_db.match_pkgs(atom))
6551
6552                         if not blocked_initial and not blocked_final:
6553                                 parent_pkgs = self._blocker_parents.parent_nodes(blocker)
6554                                 self._blocker_parents.remove(blocker)
6555                                 # Discard any parents that don't have any more blockers.
6556                                 for pkg in parent_pkgs:
6557                                         self._irrelevant_blockers.add(blocker, pkg)
6558                                         if not self._blocker_parents.child_nodes(pkg):
6559                                                 self._blocker_parents.remove(pkg)
6560                                 continue
6561                         for parent in self._blocker_parents.parent_nodes(blocker):
6562                                 unresolved_blocks = False
6563                                 depends_on_order = set()
6564                                 for pkg in blocked_initial:
6565                                         if pkg.slot_atom == parent.slot_atom:
6566                                                 # TODO: Support blocks within slots in cases where it
6567                                                 # might make sense.  For example, a new version might
6568                                                 # require that the old version be uninstalled at build
6569                                                 # time.
6570                                                 continue
6571                                         if parent.installed:
6572                                                 # Two currently installed packages conflict with
6573                                                 # eachother. Ignore this case since the damage
6574                                                 # is already done and this would be likely to
6575                                                 # confuse users if displayed like a normal blocker.
6576                                                 continue
6577
6578                                         self._blocked_pkgs.add(pkg, blocker)
6579
6580                                         if parent.operation == "merge":
6581                                                 # Maybe the blocked package can be replaced or simply
6582                                                 # unmerged to resolve this block.
6583                                                 depends_on_order.add((pkg, parent))
6584                                                 continue
6585                                         # None of the above blocker resolutions techniques apply,
6586                                         # so apparently this one is unresolvable.
6587                                         unresolved_blocks = True
6588                                 for pkg in blocked_final:
6589                                         if pkg.slot_atom == parent.slot_atom:
6590                                                 # TODO: Support blocks within slots.
6591                                                 continue
6592                                         if parent.operation == "nomerge" and \
6593                                                 pkg.operation == "nomerge":
6594                                                 # This blocker will be handled the next time that a
6595                                                 # merge of either package is triggered.
6596                                                 continue
6597
6598                                         self._blocked_pkgs.add(pkg, blocker)
6599
6600                                         # Maybe the blocking package can be
6601                                         # unmerged to resolve this block.
6602                                         if parent.operation == "merge" and pkg.installed:
6603                                                 depends_on_order.add((pkg, parent))
6604                                                 continue
6605                                         elif parent.operation == "nomerge":
6606                                                 depends_on_order.add((parent, pkg))
6607                                                 continue
6608                                         # None of the above blocker resolutions techniques apply,
6609                                         # so apparently this one is unresolvable.
6610                                         unresolved_blocks = True
6611
6612                                 # Make sure we don't unmerge any package that have been pulled
6613                                 # into the graph.
6614                                 if not unresolved_blocks and depends_on_order:
6615                                         for inst_pkg, inst_task in depends_on_order:
6616                                                 if self.digraph.contains(inst_pkg) and \
6617                                                         self.digraph.parent_nodes(inst_pkg):
6618                                                         unresolved_blocks = True
6619                                                         break
6620
6621                                 if not unresolved_blocks and depends_on_order:
6622                                         for inst_pkg, inst_task in depends_on_order:
6623                                                 uninst_task = Package(built=inst_pkg.built,
6624                                                         cpv=inst_pkg.cpv, installed=inst_pkg.installed,
6625                                                         metadata=inst_pkg.metadata,
6626                                                         operation="uninstall",
6627                                                         root_config=inst_pkg.root_config,
6628                                                         type_name=inst_pkg.type_name)
6629                                                 self._pkg_cache[uninst_task] = uninst_task
6630                                                 # Enforce correct merge order with a hard dep.
6631                                                 self.digraph.addnode(uninst_task, inst_task,
6632                                                         priority=BlockerDepPriority.instance)
6633                                                 # Count references to this blocker so that it can be
6634                                                 # invalidated after nodes referencing it have been
6635                                                 # merged.
6636                                                 self._blocker_uninstalls.addnode(uninst_task, blocker)
6637                                 if not unresolved_blocks and not depends_on_order:
6638                                         self._irrelevant_blockers.add(blocker, parent)
6639                                         self._blocker_parents.remove_edge(blocker, parent)
6640                                         if not self._blocker_parents.parent_nodes(blocker):
6641                                                 self._blocker_parents.remove(blocker)
6642                                         if not self._blocker_parents.child_nodes(parent):
6643                                                 self._blocker_parents.remove(parent)
6644                                 if unresolved_blocks:
6645                                         self._unsolvable_blockers.add(blocker, parent)
6646
6647                 return True
6648
6649         def _accept_blocker_conflicts(self):
6650                 acceptable = False
6651                 for x in ("--buildpkgonly", "--fetchonly",
6652                         "--fetch-all-uri", "--nodeps"):
6653                         if x in self.myopts:
6654                                 acceptable = True
6655                                 break
6656                 return acceptable
6657
6658         def _merge_order_bias(self, mygraph):
6659                 """Order nodes from highest to lowest overall reference count for
6660                 optimal leaf node selection."""
6661                 node_info = {}
6662                 for node in mygraph.order:
6663                         node_info[node] = len(mygraph.parent_nodes(node))
6664                 def cmp_merge_preference(node1, node2):
6665                         return node_info[node2] - node_info[node1]
6666                 mygraph.order.sort(cmp_merge_preference)
6667
6668         def altlist(self, reversed=False):
6669
6670                 while self._serialized_tasks_cache is None:
6671                         self._resolve_conflicts()
6672                         try:
6673                                 self._serialized_tasks_cache, self._scheduler_graph = \
6674                                         self._serialize_tasks()
6675                         except self._serialize_tasks_retry:
6676                                 pass
6677
6678                 retlist = self._serialized_tasks_cache[:]
6679                 if reversed:
6680                         retlist.reverse()
6681                 return retlist
6682
6683         def schedulerGraph(self):
6684                 """
6685                 The scheduler graph is identical to the normal one except that
6686                 uninstall edges are reversed in specific cases that require
6687                 conflicting packages to be temporarily installed simultaneously.
6688                 This is intended for use by the Scheduler in it's parallelization
6689                 logic. It ensures that temporary simultaneous installation of
6690                 conflicting packages is avoided when appropriate (especially for
6691                 !!atom blockers), but allowed in specific cases that require it.
6692
6693                 Note that this method calls break_refs() which alters the state of
6694                 internal Package instances such that this depgraph instance should
6695                 not be used to perform any more calculations.
6696                 """
6697                 if self._scheduler_graph is None:
6698                         self.altlist()
6699                 self.break_refs(self._scheduler_graph.order)
6700                 return self._scheduler_graph
6701
6702         def break_refs(self, nodes):
6703                 """
6704                 Take a mergelist like that returned from self.altlist() and
6705                 break any references that lead back to the depgraph. This is
6706                 useful if you want to hold references to packages without
6707                 also holding the depgraph on the heap.
6708                 """
6709                 for node in nodes:
6710                         if hasattr(node, "root_config"):
6711                                 # The FakeVartree references the _package_cache which
6712                                 # references the depgraph. So that Package instances don't
6713                                 # hold the depgraph and FakeVartree on the heap, replace
6714                                 # the RootConfig that references the FakeVartree with the
6715                                 # original RootConfig instance which references the actual
6716                                 # vartree.
6717                                 node.root_config = \
6718                                         self._trees_orig[node.root_config.root]["root_config"]
6719
6720         def _resolve_conflicts(self):
6721                 if not self._complete_graph():
6722                         raise self._unknown_internal_error()
6723
6724                 if not self.validate_blockers():
6725                         raise self._unknown_internal_error()
6726
6727                 if self._slot_collision_info:
6728                         self._process_slot_conflicts()
6729
6730         def _serialize_tasks(self):
6731
6732                 if "--debug" in self.myopts:
6733                         writemsg("\ndigraph:\n\n", noiselevel=-1)
6734                         self.digraph.debug_print()
6735                         writemsg("\n", noiselevel=-1)
6736
6737                 scheduler_graph = self.digraph.copy()
6738                 mygraph=self.digraph.copy()
6739                 # Prune "nomerge" root nodes if nothing depends on them, since
6740                 # otherwise they slow down merge order calculation. Don't remove
6741                 # non-root nodes since they help optimize merge order in some cases
6742                 # such as revdep-rebuild.
6743                 removed_nodes = set()
6744                 while True:
6745                         for node in mygraph.root_nodes():
6746                                 if not isinstance(node, Package) or \
6747                                         node.installed or node.onlydeps:
6748                                         removed_nodes.add(node)
6749                         if removed_nodes:
6750                                 self.spinner.update()
6751                                 mygraph.difference_update(removed_nodes)
6752                         if not removed_nodes:
6753                                 break
6754                         removed_nodes.clear()
6755                 self._merge_order_bias(mygraph)
6756                 def cmp_circular_bias(n1, n2):
6757                         """
6758                         RDEPEND is stronger than PDEPEND and this function
6759                         measures such a strength bias within a circular
6760                         dependency relationship.
6761                         """
6762                         n1_n2_medium = n2 in mygraph.child_nodes(n1,
6763                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6764                         n2_n1_medium = n1 in mygraph.child_nodes(n2,
6765                                 ignore_priority=DepPriority.MEDIUM_SOFT)
6766                         if n1_n2_medium == n2_n1_medium:
6767                                 return 0
6768                         elif n1_n2_medium:
6769                                 return 1
6770                         return -1
6771                 myblocker_uninstalls = self._blocker_uninstalls.copy()
6772                 retlist=[]
6773                 # Contains uninstall tasks that have been scheduled to
6774                 # occur after overlapping blockers have been installed.
6775                 scheduled_uninstalls = set()
6776                 # Contains any Uninstall tasks that have been ignored
6777                 # in order to avoid the circular deps code path. These
6778                 # correspond to blocker conflicts that could not be
6779                 # resolved.
6780                 ignored_uninstall_tasks = set()
6781                 have_uninstall_task = False
6782                 complete = "complete" in self.myparams
6783                 asap_nodes = []
6784
6785                 def get_nodes(**kwargs):
6786                         """
6787                         Returns leaf nodes excluding Uninstall instances
6788                         since those should be executed as late as possible.
6789                         """
6790                         return [node for node in mygraph.leaf_nodes(**kwargs) \
6791                                 if isinstance(node, Package) and \
6792                                         (node.operation != "uninstall" or \
6793                                         node in scheduled_uninstalls)]
6794
6795                 # sys-apps/portage needs special treatment if ROOT="/"
6796                 running_root = self._running_root.root
6797                 from portage.const import PORTAGE_PACKAGE_ATOM
6798                 runtime_deps = InternalPackageSet(
6799                         initial_atoms=[PORTAGE_PACKAGE_ATOM])
6800                 running_portage = self.trees[running_root]["vartree"].dbapi.match_pkgs(
6801                         PORTAGE_PACKAGE_ATOM)
6802                 replacement_portage = self.mydbapi[running_root].match_pkgs(
6803                         PORTAGE_PACKAGE_ATOM)
6804
6805                 if running_portage:
6806                         running_portage = running_portage[0]
6807                 else:
6808                         running_portage = None
6809
6810                 if replacement_portage:
6811                         replacement_portage = replacement_portage[0]
6812                 else:
6813                         replacement_portage = None
6814
6815                 if replacement_portage == running_portage:
6816                         replacement_portage = None
6817
6818                 if replacement_portage is not None:
6819                         # update from running_portage to replacement_portage asap
6820                         asap_nodes.append(replacement_portage)
6821
6822                 if running_portage is not None:
6823                         try:
6824                                 portage_rdepend = self._select_atoms_highest_available(
6825                                         running_root, running_portage.metadata["RDEPEND"],
6826                                         myuse=running_portage.use.enabled,
6827                                         parent=running_portage, strict=False)
6828                         except portage.exception.InvalidDependString, e:
6829                                 portage.writemsg("!!! Invalid RDEPEND in " + \
6830                                         "'%svar/db/pkg/%s/RDEPEND': %s\n" % \
6831                                         (running_root, running_portage.cpv, e), noiselevel=-1)
6832                                 del e
6833                                 portage_rdepend = []
6834                         runtime_deps.update(atom for atom in portage_rdepend \
6835                                 if not atom.startswith("!"))
6836
6837                 ignore_priority_soft_range = [None]
6838                 ignore_priority_soft_range.extend(
6839                         xrange(DepPriority.MIN, DepPriority.MEDIUM_SOFT + 1))
6840                 tree_mode = "--tree" in self.myopts
6841                 # Tracks whether or not the current iteration should prefer asap_nodes
6842                 # if available.  This is set to False when the previous iteration
6843                 # failed to select any nodes.  It is reset whenever nodes are
6844                 # successfully selected.
6845                 prefer_asap = True
6846
6847                 # By default, try to avoid selecting root nodes whenever possible. This
6848                 # helps ensure that the maximimum possible number of soft dependencies
6849                 # have been removed from the graph before their parent nodes have
6850                 # selected. This is especially important when those dependencies are
6851                 # going to be rebuilt by revdep-rebuild or `emerge -e system` after the
6852                 # CHOST has been changed (like when building a stage3 from a stage2).
6853                 accept_root_node = False
6854
6855                 # State of prefer_asap and accept_root_node flags for successive
6856                 # iterations that loosen the criteria for node selection.
6857                 #
6858                 # iteration   prefer_asap   accept_root_node
6859                 # 1           True          False
6860                 # 2           False         False
6861                 # 3           False         True
6862                 #
6863                 # If no nodes are selected on the 3rd iteration, it is due to
6864                 # unresolved blockers or circular dependencies.
6865
6866                 while not mygraph.empty():
6867                         self.spinner.update()
6868                         selected_nodes = None
6869                         ignore_priority = None
6870                         if prefer_asap and asap_nodes:
6871                                 """ASAP nodes are merged before their soft deps."""
6872                                 asap_nodes = [node for node in asap_nodes \
6873                                         if mygraph.contains(node)]
6874                                 for node in asap_nodes:
6875                                         if not mygraph.child_nodes(node,
6876                                                 ignore_priority=DepPriority.SOFT):
6877                                                 selected_nodes = [node]
6878                                                 asap_nodes.remove(node)
6879                                                 break
6880                         if not selected_nodes and \
6881                                 not (prefer_asap and asap_nodes):
6882                                 for ignore_priority in ignore_priority_soft_range:
6883                                         nodes = get_nodes(ignore_priority=ignore_priority)
6884                                         if nodes:
6885                                                 break
6886                                 if nodes:
6887                                         if ignore_priority is None and not tree_mode:
6888                                                 # Greedily pop all of these nodes since no relationship
6889                                                 # has been ignored.  This optimization destroys --tree
6890                                                 # output, so it's disabled in reversed mode. If there
6891                                                 # is a mix of merge and uninstall nodes, save the
6892                                                 # uninstall nodes from later since sometimes a merge
6893                                                 # node will render an install node unnecessary, and
6894                                                 # we want to avoid doing a separate uninstall task in
6895                                                 # that case.
6896                                                 merge_nodes = [node for node in nodes \
6897                                                         if node.operation == "merge"]
6898                                                 if merge_nodes:
6899                                                         selected_nodes = merge_nodes
6900                                                 else:
6901                                                         selected_nodes = nodes
6902                                         else:
6903                                                 # For optimal merge order:
6904                                                 #  * Only pop one node.
6905                                                 #  * Removing a root node (node without a parent)
6906                                                 #    will not produce a leaf node, so avoid it.
6907                                                 for node in nodes:
6908                                                         if mygraph.parent_nodes(node):
6909                                                                 # found a non-root node
6910                                                                 selected_nodes = [node]
6911                                                                 break
6912                                                 if not selected_nodes and \
6913                                                         (accept_root_node or ignore_priority is None):
6914                                                         # settle for a root node
6915                                                         selected_nodes = [nodes[0]]
6916
6917                         if not selected_nodes:
6918                                 nodes = get_nodes(ignore_priority=DepPriority.MEDIUM)
6919                                 if nodes:
6920                                         """Recursively gather a group of nodes that RDEPEND on
6921                                         eachother.  This ensures that they are merged as a group
6922                                         and get their RDEPENDs satisfied as soon as possible."""
6923                                         def gather_deps(ignore_priority,
6924                                                 mergeable_nodes, selected_nodes, node):
6925                                                 if node in selected_nodes:
6926                                                         return True
6927                                                 if node not in mergeable_nodes:
6928                                                         return False
6929                                                 if node == replacement_portage and \
6930                                                         mygraph.child_nodes(node,
6931                                                         ignore_priority=DepPriority.MEDIUM_SOFT):
6932                                                         # Make sure that portage always has all of it's
6933                                                         # RDEPENDs installed first.
6934                                                         return False
6935                                                 selected_nodes.add(node)
6936                                                 for child in mygraph.child_nodes(node,
6937                                                         ignore_priority=ignore_priority):
6938                                                         if not gather_deps(ignore_priority,
6939                                                                 mergeable_nodes, selected_nodes, child):
6940                                                                 return False
6941                                                 return True
6942                                         mergeable_nodes = set(nodes)
6943                                         if prefer_asap and asap_nodes:
6944                                                 nodes = asap_nodes
6945                                         for ignore_priority in xrange(DepPriority.SOFT,
6946                                                 DepPriority.MEDIUM_SOFT + 1):
6947                                                 for node in nodes:
6948                                                         if nodes is not asap_nodes and \
6949                                                                 not accept_root_node and \
6950                                                                 not mygraph.parent_nodes(node):
6951                                                                 continue
6952                                                         selected_nodes = set()
6953                                                         if gather_deps(ignore_priority,
6954                                                                 mergeable_nodes, selected_nodes, node):
6955                                                                 break
6956                                                         else:
6957                                                                 selected_nodes = None
6958                                                 if selected_nodes:
6959                                                         break
6960
6961                                         # If any nodes have been selected here, it's always
6962                                         # possible that anything up to a MEDIUM_SOFT priority
6963                                         # relationship has been ignored. This state is recorded
6964                                         # in ignore_priority so that relevant nodes will be
6965                                         # added to asap_nodes when appropriate.
6966                                         if selected_nodes:
6967                                                 ignore_priority = DepPriority.MEDIUM_SOFT
6968
6969                                         if prefer_asap and asap_nodes and not selected_nodes:
6970                                                 # We failed to find any asap nodes to merge, so ignore
6971                                                 # them for the next iteration.
6972                                                 prefer_asap = False
6973                                                 continue
6974
6975                                         if not selected_nodes and not accept_root_node:
6976                                                 # Maybe there are only root nodes left, so accept them
6977                                                 # for the next iteration.
6978                                                 accept_root_node = True
6979                                                 continue
6980
6981                         if selected_nodes and ignore_priority > DepPriority.SOFT:
6982                                 # Try to merge ignored medium deps as soon as possible.
6983                                 for node in selected_nodes:
6984                                         children = set(mygraph.child_nodes(node))
6985                                         soft = children.difference(
6986                                                 mygraph.child_nodes(node,
6987                                                 ignore_priority=DepPriority.SOFT))
6988                                         medium_soft = children.difference(
6989                                                 mygraph.child_nodes(node,
6990                                                 ignore_priority=DepPriority.MEDIUM_SOFT))
6991                                         medium_soft.difference_update(soft)
6992                                         for child in medium_soft:
6993                                                 if child in selected_nodes:
6994                                                         continue
6995                                                 if child in asap_nodes:
6996                                                         continue
6997                                                 asap_nodes.append(child)
6998
6999                         if selected_nodes and len(selected_nodes) > 1:
7000                                 if not isinstance(selected_nodes, list):
7001                                         selected_nodes = list(selected_nodes)
7002                                 selected_nodes.sort(cmp_circular_bias)
7003
7004                         if not selected_nodes and not myblocker_uninstalls.is_empty():
7005                                 # An Uninstall task needs to be executed in order to
7006                                 # avoid conflict if possible.
7007                                 min_parent_deps = None
7008                                 uninst_task = None
7009                                 for task in myblocker_uninstalls.leaf_nodes():
7010                                         # Do some sanity checks so that system or world packages
7011                                         # don't get uninstalled inappropriately here (only really
7012                                         # necessary when --complete-graph has not been enabled).
7013
7014                                         if task in ignored_uninstall_tasks:
7015                                                 continue
7016
7017                                         if task in scheduled_uninstalls:
7018                                                 # It's been scheduled but it hasn't
7019                                                 # been executed yet due to dependence
7020                                                 # on installation of blocking packages.
7021                                                 continue
7022
7023                                         root_config = self.roots[task.root]
7024                                         inst_pkg = self._pkg_cache[
7025                                                 ("installed", task.root, task.cpv, "nomerge")]
7026
7027                                         if self.digraph.contains(inst_pkg):
7028                                                 continue
7029
7030                                         forbid_overlap = False
7031                                         heuristic_overlap = False
7032                                         for blocker in myblocker_uninstalls.parent_nodes(task):
7033                                                 if blocker.eapi in ("0", "1"):
7034                                                         heuristic_overlap = True
7035                                                 elif blocker.atom.blocker.overlap.forbid:
7036                                                         forbid_overlap = True
7037                                                         break
7038                                         if forbid_overlap and running_root == task.root:
7039                                                 continue
7040
7041                                         if heuristic_overlap and running_root == task.root:
7042                                                 # Never uninstall sys-apps/portage or it's essential
7043                                                 # dependencies, except through replacement.
7044                                                 try:
7045                                                         runtime_dep_atoms = \
7046                                                                 list(runtime_deps.iterAtomsForPackage(task))
7047                                                 except portage.exception.InvalidDependString, e:
7048                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7049                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7050                                                                 (task.root, task.cpv, e), noiselevel=-1)
7051                                                         del e
7052                                                         continue
7053
7054                                                 # Don't uninstall a runtime dep if it appears
7055                                                 # to be the only suitable one installed.
7056                                                 skip = False
7057                                                 vardb = root_config.trees["vartree"].dbapi
7058                                                 for atom in runtime_dep_atoms:
7059                                                         other_version = None
7060                                                         for pkg in vardb.match_pkgs(atom):
7061                                                                 if pkg.cpv == task.cpv and \
7062                                                                         pkg.metadata["COUNTER"] == \
7063                                                                         task.metadata["COUNTER"]:
7064                                                                         continue
7065                                                                 other_version = pkg
7066                                                                 break
7067                                                         if other_version is None:
7068                                                                 skip = True
7069                                                                 break
7070                                                 if skip:
7071                                                         continue
7072
7073                                                 # For packages in the system set, don't take
7074                                                 # any chances. If the conflict can't be resolved
7075                                                 # by a normal replacement operation then abort.
7076                                                 skip = False
7077                                                 try:
7078                                                         for atom in root_config.sets[
7079                                                                 "system"].iterAtomsForPackage(task):
7080                                                                 skip = True
7081                                                                 break
7082                                                 except portage.exception.InvalidDependString, e:
7083                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7084                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7085                                                                 (task.root, task.cpv, e), noiselevel=-1)
7086                                                         del e
7087                                                         skip = True
7088                                                 if skip:
7089                                                         continue
7090
7091                                         # Note that the world check isn't always
7092                                         # necessary since self._complete_graph() will
7093                                         # add all packages from the system and world sets to the
7094                                         # graph. This just allows unresolved conflicts to be
7095                                         # detected as early as possible, which makes it possible
7096                                         # to avoid calling self._complete_graph() when it is
7097                                         # unnecessary due to blockers triggering an abortion.
7098                                         if not complete:
7099                                                 # For packages in the world set, go ahead an uninstall
7100                                                 # when necessary, as long as the atom will be satisfied
7101                                                 # in the final state.
7102                                                 graph_db = self.mydbapi[task.root]
7103                                                 skip = False
7104                                                 try:
7105                                                         for atom in root_config.sets[
7106                                                                 "world"].iterAtomsForPackage(task):
7107                                                                 satisfied = False
7108                                                                 for pkg in graph_db.match_pkgs(atom):
7109                                                                         if pkg == inst_pkg:
7110                                                                                 continue
7111                                                                         satisfied = True
7112                                                                         break
7113                                                                 if not satisfied:
7114                                                                         skip = True
7115                                                                         self._blocked_world_pkgs[inst_pkg] = atom
7116                                                                         break
7117                                                 except portage.exception.InvalidDependString, e:
7118                                                         portage.writemsg("!!! Invalid PROVIDE in " + \
7119                                                                 "'%svar/db/pkg/%s/PROVIDE': %s\n" % \
7120                                                                 (task.root, task.cpv, e), noiselevel=-1)
7121                                                         del e
7122                                                         skip = True
7123                                                 if skip:
7124                                                         continue
7125
7126                                         # Check the deps of parent nodes to ensure that
7127                                         # the chosen task produces a leaf node. Maybe
7128                                         # this can be optimized some more to make the
7129                                         # best possible choice, but the current algorithm
7130                                         # is simple and should be near optimal for most
7131                                         # common cases.
7132                                         parent_deps = set()
7133                                         for parent in mygraph.parent_nodes(task):
7134                                                 parent_deps.update(mygraph.child_nodes(parent,
7135                                                         ignore_priority=DepPriority.MEDIUM_SOFT))
7136                                         parent_deps.remove(task)
7137                                         if min_parent_deps is None or \
7138                                                 len(parent_deps) < min_parent_deps:
7139                                                 min_parent_deps = len(parent_deps)
7140                                                 uninst_task = task
7141
7142                                 if uninst_task is not None:
7143                                         # The uninstall is performed only after blocking
7144                                         # packages have been merged on top of it. File
7145                                         # collisions between blocking packages are detected
7146                                         # and removed from the list of files to be uninstalled.
7147                                         scheduled_uninstalls.add(uninst_task)
7148                                         parent_nodes = mygraph.parent_nodes(uninst_task)
7149
7150                                         # Reverse the parent -> uninstall edges since we want
7151                                         # to do the uninstall after blocking packages have
7152                                         # been merged on top of it.
7153                                         mygraph.remove(uninst_task)
7154                                         for blocked_pkg in parent_nodes:
7155                                                 mygraph.add(blocked_pkg, uninst_task,
7156                                                         priority=BlockerDepPriority.instance)
7157                                                 scheduler_graph.remove_edge(uninst_task, blocked_pkg)
7158                                                 scheduler_graph.add(blocked_pkg, uninst_task,
7159                                                         priority=BlockerDepPriority.instance)
7160
7161                                 else:
7162                                         # None of the Uninstall tasks are acceptable, so
7163                                         # the corresponding blockers are unresolvable.
7164                                         # We need to drop an Uninstall task here in order
7165                                         # to avoid the circular deps code path, but the
7166                                         # blocker will still be counted as an unresolved
7167                                         # conflict.
7168                                         for node in myblocker_uninstalls.leaf_nodes():
7169                                                 try:
7170                                                         mygraph.remove(node)
7171                                                 except KeyError:
7172                                                         pass
7173                                                 else:
7174                                                         uninst_task = node
7175                                                         ignored_uninstall_tasks.add(node)
7176                                                         break
7177
7178                                 if uninst_task is not None:
7179                                         # After dropping an Uninstall task, reset
7180                                         # the state variables for leaf node selection and
7181                                         # continue trying to select leaf nodes.
7182                                         prefer_asap = True
7183                                         accept_root_node = False
7184                                         continue
7185
7186                         if not selected_nodes:
7187                                 self._circular_deps_for_display = mygraph
7188                                 raise self._unknown_internal_error()
7189
7190                         # At this point, we've succeeded in selecting one or more nodes, so
7191                         # it's now safe to reset the prefer_asap and accept_root_node flags
7192                         # to their default states.
7193                         prefer_asap = True
7194                         accept_root_node = False
7195
7196                         mygraph.difference_update(selected_nodes)
7197
7198                         for node in selected_nodes:
7199                                 if isinstance(node, Package) and \
7200                                         node.operation == "nomerge":
7201                                         continue
7202
7203                                 # Handle interactions between blockers
7204                                 # and uninstallation tasks.
7205                                 solved_blockers = set()
7206                                 uninst_task = None
7207                                 if isinstance(node, Package) and \
7208                                         "uninstall" == node.operation:
7209                                         have_uninstall_task = True
7210                                         uninst_task = node
7211                                 else:
7212                                         vardb = self.trees[node.root]["vartree"].dbapi
7213                                         previous_cpv = vardb.match(node.slot_atom)
7214                                         if previous_cpv:
7215                                                 # The package will be replaced by this one, so remove
7216                                                 # the corresponding Uninstall task if necessary.
7217                                                 previous_cpv = previous_cpv[0]
7218                                                 uninst_task = \
7219                                                         ("installed", node.root, previous_cpv, "uninstall")
7220                                                 try:
7221                                                         mygraph.remove(uninst_task)
7222                                                 except KeyError:
7223                                                         pass
7224
7225                                 if uninst_task is not None and \
7226                                         uninst_task not in ignored_uninstall_tasks and \
7227                                         myblocker_uninstalls.contains(uninst_task):
7228                                         blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
7229                                         myblocker_uninstalls.remove(uninst_task)
7230                                         # Discard any blockers that this Uninstall solves.
7231                                         for blocker in blocker_nodes:
7232                                                 if not myblocker_uninstalls.child_nodes(blocker):
7233                                                         myblocker_uninstalls.remove(blocker)
7234                                                         solved_blockers.add(blocker)
7235
7236                                 retlist.append(node)
7237
7238                                 if (isinstance(node, Package) and \
7239                                         "uninstall" == node.operation) or \
7240                                         (uninst_task is not None and \
7241                                         uninst_task in scheduled_uninstalls):
7242                                         # Include satisfied blockers in the merge list
7243                                         # since the user might be interested and also
7244                                         # it serves as an indicator that blocking packages
7245                                         # will be temporarily installed simultaneously.
7246                                         for blocker in solved_blockers:
7247                                                 retlist.append(Blocker(atom=blocker.atom,
7248                                                         root=blocker.root, eapi=blocker.eapi,
7249                                                         satisfied=True))
7250
7251                 unsolvable_blockers = set(self._unsolvable_blockers.leaf_nodes())
7252                 for node in myblocker_uninstalls.root_nodes():
7253                         unsolvable_blockers.add(node)
7254
7255                 for blocker in unsolvable_blockers:
7256                         retlist.append(blocker)
7257
7258                 # If any Uninstall tasks need to be executed in order
7259                 # to avoid a conflict, complete the graph with any
7260                 # dependencies that may have been initially
7261                 # neglected (to ensure that unsafe Uninstall tasks
7262                 # are properly identified and blocked from execution).
7263                 if have_uninstall_task and \
7264                         not complete and \
7265                         not unsolvable_blockers:
7266                         self.myparams.add("complete")
7267                         raise self._serialize_tasks_retry("")
7268
7269                 if unsolvable_blockers and \
7270                         not self._accept_blocker_conflicts():
7271                         self._unsatisfied_blockers_for_display = unsolvable_blockers
7272                         self._serialized_tasks_cache = retlist[:]
7273                         self._scheduler_graph = scheduler_graph
7274                         raise self._unknown_internal_error()
7275
7276                 if self._slot_collision_info and \
7277                         not self._accept_blocker_conflicts():
7278                         self._serialized_tasks_cache = retlist[:]
7279                         self._scheduler_graph = scheduler_graph
7280                         raise self._unknown_internal_error()
7281
7282                 return retlist, scheduler_graph
7283
7284         def _show_circular_deps(self, mygraph):
7285                 # No leaf nodes are available, so we have a circular
7286                 # dependency panic situation.  Reduce the noise level to a
7287                 # minimum via repeated elimination of root nodes since they
7288                 # have no parents and thus can not be part of a cycle.
7289                 while True:
7290                         root_nodes = mygraph.root_nodes(
7291                                 ignore_priority=DepPriority.MEDIUM_SOFT)
7292                         if not root_nodes:
7293                                 break
7294                         mygraph.difference_update(root_nodes)
7295                 # Display the USE flags that are enabled on nodes that are part
7296                 # of dependency cycles in case that helps the user decide to
7297                 # disable some of them.
7298                 display_order = []
7299                 tempgraph = mygraph.copy()
7300                 while not tempgraph.empty():
7301                         nodes = tempgraph.leaf_nodes()
7302                         if not nodes:
7303                                 node = tempgraph.order[0]
7304                         else:
7305                                 node = nodes[0]
7306                         display_order.append(node)
7307                         tempgraph.remove(node)
7308                 display_order.reverse()
7309                 self.myopts.pop("--quiet", None)
7310                 self.myopts.pop("--verbose", None)
7311                 self.myopts["--tree"] = True
7312                 portage.writemsg("\n\n", noiselevel=-1)
7313                 self.display(display_order)
7314                 prefix = colorize("BAD", " * ")
7315                 portage.writemsg("\n", noiselevel=-1)
7316                 portage.writemsg(prefix + "Error: circular dependencies:\n",
7317                         noiselevel=-1)
7318                 portage.writemsg("\n", noiselevel=-1)
7319                 mygraph.debug_print()
7320                 portage.writemsg("\n", noiselevel=-1)
7321                 portage.writemsg(prefix + "Note that circular dependencies " + \
7322                         "can often be avoided by temporarily\n", noiselevel=-1)
7323                 portage.writemsg(prefix + "disabling USE flags that trigger " + \
7324                         "optional dependencies.\n", noiselevel=-1)
7325
7326         def _show_merge_list(self):
7327                 if self._serialized_tasks_cache is not None and \
7328                         not (self._displayed_list and \
7329                         (self._displayed_list == self._serialized_tasks_cache or \
7330                         self._displayed_list == \
7331                                 list(reversed(self._serialized_tasks_cache)))):
7332                         display_list = self._serialized_tasks_cache[:]
7333                         if "--tree" in self.myopts:
7334                                 display_list.reverse()
7335                         self.display(display_list)
7336
7337         def _show_unsatisfied_blockers(self, blockers):
7338                 self._show_merge_list()
7339                 msg = "Error: The above package list contains " + \
7340                         "packages which cannot be installed " + \
7341                         "at the same time on the same system."
7342                 prefix = colorize("BAD", " * ")
7343                 from textwrap import wrap
7344                 portage.writemsg("\n", noiselevel=-1)
7345                 for line in wrap(msg, 70):
7346                         portage.writemsg(prefix + line + "\n", noiselevel=-1)
7347
7348                 # Display the conflicting packages along with the packages
7349                 # that pulled them in. This is helpful for troubleshooting
7350                 # cases in which blockers don't solve automatically and
7351                 # the reasons are not apparent from the normal merge list
7352                 # display.
7353
7354                 conflict_pkgs = {}
7355                 for blocker in blockers:
7356                         for pkg in chain(self._blocked_pkgs.child_nodes(blocker), \
7357                                 self._blocker_parents.parent_nodes(blocker)):
7358                                 parent_atoms = self._parent_atoms.get(pkg)
7359                                 if not parent_atoms:
7360                                         atom = self._blocked_world_pkgs.get(pkg)
7361                                         if atom is not None:
7362                                                 parent_atoms = set([("@world", atom)])
7363                                 if parent_atoms:
7364                                         conflict_pkgs[pkg] = parent_atoms
7365
7366                 if conflict_pkgs:
7367                         # Reduce noise by pruning packages that are only
7368                         # pulled in by other conflict packages.
7369                         pruned_pkgs = set()
7370                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7371                                 relevant_parent = False
7372                                 for parent, atom in parent_atoms:
7373                                         if parent not in conflict_pkgs:
7374                                                 relevant_parent = True
7375                                                 break
7376                                 if not relevant_parent:
7377                                         pruned_pkgs.add(pkg)
7378                         for pkg in pruned_pkgs:
7379                                 del conflict_pkgs[pkg]
7380
7381                 if conflict_pkgs:
7382                         msg = []
7383                         msg.append("\n")
7384                         indent = "  "
7385                         # Max number of parents shown, to avoid flooding the display.
7386                         max_parents = 3
7387                         for pkg, parent_atoms in conflict_pkgs.iteritems():
7388
7389                                 pruned_list = set()
7390
7391                                 # Prefer packages that are not directly involved in a conflict.
7392                                 for parent_atom in parent_atoms:
7393                                         if len(pruned_list) >= max_parents:
7394                                                 break
7395                                         parent, atom = parent_atom
7396                                         if parent not in conflict_pkgs:
7397                                                 pruned_list.add(parent_atom)
7398
7399                                 for parent_atom in parent_atoms:
7400                                         if len(pruned_list) >= max_parents:
7401                                                 break
7402                                         pruned_list.add(parent_atom)
7403
7404                                 omitted_parents = len(parent_atoms) - len(pruned_list)
7405                                 msg.append(indent + "%s pulled in by\n" % pkg)
7406
7407                                 for parent_atom in pruned_list:
7408                                         parent, atom = parent_atom
7409                                         msg.append(2*indent)
7410                                         if isinstance(parent,
7411                                                 (PackageArg, AtomArg)):
7412                                                 # For PackageArg and AtomArg types, it's
7413                                                 # redundant to display the atom attribute.
7414                                                 msg.append(str(parent))
7415                                         else:
7416                                                 # Display the specific atom from SetArg or
7417                                                 # Package types.
7418                                                 msg.append("%s required by %s" % (atom, parent))
7419                                         msg.append("\n")
7420
7421                                 if omitted_parents:
7422                                         msg.append(2*indent)
7423                                         msg.append("(and %d more)\n" % omitted_parents)
7424
7425                                 msg.append("\n")
7426
7427                         sys.stderr.write("".join(msg))
7428                         sys.stderr.flush()
7429
7430                 if "--quiet" not in self.myopts:
7431                         show_blocker_docs_link()
7432
7433         def display(self, mylist, favorites=[], verbosity=None):
7434
7435                 # This is used to prevent display_problems() from
7436                 # redundantly displaying this exact same merge list
7437                 # again via _show_merge_list().
7438                 self._displayed_list = mylist
7439
7440                 if verbosity is None:
7441                         verbosity = ("--quiet" in self.myopts and 1 or \
7442                                 "--verbose" in self.myopts and 3 or 2)
7443                 favorites_set = InternalPackageSet(favorites)
7444                 oneshot = "--oneshot" in self.myopts or \
7445                         "--onlydeps" in self.myopts
7446                 columns = "--columns" in self.myopts
7447                 changelogs=[]
7448                 p=[]
7449                 blockers = []
7450
7451                 counters = PackageCounters()
7452
7453                 if verbosity == 1 and "--verbose" not in self.myopts:
7454                         def create_use_string(*args):
7455                                 return ""
7456                 else:
7457                         def create_use_string(name, cur_iuse, iuse_forced, cur_use,
7458                                 old_iuse, old_use,
7459                                 is_new, reinst_flags,
7460                                 all_flags=(verbosity == 3 or "--quiet" in self.myopts),
7461                                 alphabetical=("--alphabetical" in self.myopts)):
7462                                 enabled = []
7463                                 if alphabetical:
7464                                         disabled = enabled
7465                                         removed = enabled
7466                                 else:
7467                                         disabled = []
7468                                         removed = []
7469                                 cur_iuse = set(cur_iuse)
7470                                 enabled_flags = cur_iuse.intersection(cur_use)
7471                                 removed_iuse = set(old_iuse).difference(cur_iuse)
7472                                 any_iuse = cur_iuse.union(old_iuse)
7473                                 any_iuse = list(any_iuse)
7474                                 any_iuse.sort()
7475                                 for flag in any_iuse:
7476                                         flag_str = None
7477                                         isEnabled = False
7478                                         reinst_flag = reinst_flags and flag in reinst_flags
7479                                         if flag in enabled_flags:
7480                                                 isEnabled = True
7481                                                 if is_new or flag in old_use and \
7482                                                         (all_flags or reinst_flag):
7483                                                         flag_str = red(flag)
7484                                                 elif flag not in old_iuse:
7485                                                         flag_str = yellow(flag) + "%*"
7486                                                 elif flag not in old_use:
7487                                                         flag_str = green(flag) + "*"
7488                                         elif flag in removed_iuse:
7489                                                 if all_flags or reinst_flag:
7490                                                         flag_str = yellow("-" + flag) + "%"
7491                                                         if flag in old_use:
7492                                                                 flag_str += "*"
7493                                                         flag_str = "(" + flag_str + ")"
7494                                                         removed.append(flag_str)
7495                                                 continue
7496                                         else:
7497                                                 if is_new or flag in old_iuse and \
7498                                                         flag not in old_use and \
7499                                                         (all_flags or reinst_flag):
7500                                                         flag_str = blue("-" + flag)
7501                                                 elif flag not in old_iuse:
7502                                                         flag_str = yellow("-" + flag)
7503                                                         if flag not in iuse_forced:
7504                                                                 flag_str += "%"
7505                                                 elif flag in old_use:
7506                                                         flag_str = green("-" + flag) + "*"
7507                                         if flag_str:
7508                                                 if flag in iuse_forced:
7509                                                         flag_str = "(" + flag_str + ")"
7510                                                 if isEnabled:
7511                                                         enabled.append(flag_str)
7512                                                 else:
7513                                                         disabled.append(flag_str)
7514
7515                                 if alphabetical:
7516                                         ret = " ".join(enabled)
7517                                 else:
7518                                         ret = " ".join(enabled + disabled + removed)
7519                                 if ret:
7520                                         ret = '%s="%s" ' % (name, ret)
7521                                 return ret
7522
7523                 repo_display = RepoDisplay(self.roots)
7524
7525                 tree_nodes = []
7526                 display_list = []
7527                 mygraph = self.digraph.copy()
7528
7529                 # If there are any Uninstall instances, add the corresponding
7530                 # blockers to the digraph (useful for --tree display).
7531
7532                 executed_uninstalls = set(node for node in mylist \
7533                         if isinstance(node, Package) and node.operation == "unmerge")
7534
7535                 for uninstall in self._blocker_uninstalls.leaf_nodes():
7536                         uninstall_parents = \
7537                                 self._blocker_uninstalls.parent_nodes(uninstall)
7538                         if not uninstall_parents:
7539                                 continue
7540
7541                         # Remove the corresponding "nomerge" node and substitute
7542                         # the Uninstall node.
7543                         inst_pkg = self._pkg_cache[
7544                                 ("installed", uninstall.root, uninstall.cpv, "nomerge")]
7545                         try:
7546                                 mygraph.remove(inst_pkg)
7547                         except KeyError:
7548                                 pass
7549
7550                         try:
7551                                 inst_pkg_blockers = self._blocker_parents.child_nodes(inst_pkg)
7552                         except KeyError:
7553                                 inst_pkg_blockers = []
7554
7555                         # Break the Package -> Uninstall edges.
7556                         mygraph.remove(uninstall)
7557
7558                         # Resolution of a package's blockers
7559                         # depend on it's own uninstallation.
7560                         for blocker in inst_pkg_blockers:
7561                                 mygraph.add(uninstall, blocker)
7562
7563                         # Expand Package -> Uninstall edges into
7564                         # Package -> Blocker -> Uninstall edges.
7565                         for blocker in uninstall_parents:
7566                                 mygraph.add(uninstall, blocker)
7567                                 for parent in self._blocker_parents.parent_nodes(blocker):
7568                                         if parent != inst_pkg:
7569                                                 mygraph.add(blocker, parent)
7570
7571                         # If the uninstall task did not need to be executed because
7572                         # of an upgrade, display Blocker -> Upgrade edges since the
7573                         # corresponding Blocker -> Uninstall edges will not be shown.
7574                         upgrade_node = \
7575                                 self._slot_pkg_map[uninstall.root].get(uninstall.slot_atom)
7576                         if upgrade_node is not None and \
7577                                 uninstall not in executed_uninstalls:
7578                                 for blocker in uninstall_parents:
7579                                         mygraph.add(upgrade_node, blocker)
7580
7581                 unsatisfied_blockers = []
7582                 i = 0
7583                 depth = 0
7584                 shown_edges = set()
7585                 for x in mylist:
7586                         if isinstance(x, Blocker) and not x.satisfied:
7587                                 unsatisfied_blockers.append(x)
7588                                 continue
7589                         graph_key = x
7590                         if "--tree" in self.myopts:
7591                                 depth = len(tree_nodes)
7592                                 while depth and graph_key not in \
7593                                         mygraph.child_nodes(tree_nodes[depth-1]):
7594                                                 depth -= 1
7595                                 if depth:
7596                                         tree_nodes = tree_nodes[:depth]
7597                                         tree_nodes.append(graph_key)
7598                                         display_list.append((x, depth, True))
7599                                         shown_edges.add((graph_key, tree_nodes[depth-1]))
7600                                 else:
7601                                         traversed_nodes = set() # prevent endless circles
7602                                         traversed_nodes.add(graph_key)
7603                                         def add_parents(current_node, ordered):
7604                                                 parent_nodes = None
7605                                                 # Do not traverse to parents if this node is an
7606                                                 # an argument or a direct member of a set that has
7607                                                 # been specified as an argument (system or world).
7608                                                 if current_node not in self._set_nodes:
7609                                                         parent_nodes = mygraph.parent_nodes(current_node)
7610                                                 if parent_nodes:
7611                                                         child_nodes = set(mygraph.child_nodes(current_node))
7612                                                         selected_parent = None
7613                                                         # First, try to avoid a direct cycle.
7614                                                         for node in parent_nodes:
7615                                                                 if not isinstance(node, (Blocker, Package)):
7616                                                                         continue
7617                                                                 if node not in traversed_nodes and \
7618                                                                         node not in child_nodes:
7619                                                                         edge = (current_node, node)
7620                                                                         if edge in shown_edges:
7621                                                                                 continue
7622                                                                         selected_parent = node
7623                                                                         break
7624                                                         if not selected_parent:
7625                                                                 # A direct cycle is unavoidable.
7626                                                                 for node in parent_nodes:
7627                                                                         if not isinstance(node, (Blocker, Package)):
7628                                                                                 continue
7629                                                                         if node not in traversed_nodes:
7630                                                                                 edge = (current_node, node)
7631                                                                                 if edge in shown_edges:
7632                                                                                         continue
7633                                                                                 selected_parent = node
7634                                                                                 break
7635                                                         if selected_parent:
7636                                                                 shown_edges.add((current_node, selected_parent))
7637                                                                 traversed_nodes.add(selected_parent)
7638                                                                 add_parents(selected_parent, False)
7639                                                 display_list.append((current_node,
7640                                                         len(tree_nodes), ordered))
7641                                                 tree_nodes.append(current_node)
7642                                         tree_nodes = []
7643                                         add_parents(graph_key, True)
7644                         else:
7645                                 display_list.append((x, depth, True))
7646                 mylist = display_list
7647                 for x in unsatisfied_blockers:
7648                         mylist.append((x, 0, True))
7649
7650                 last_merge_depth = 0
7651                 for i in xrange(len(mylist)-1,-1,-1):
7652                         graph_key, depth, ordered = mylist[i]
7653                         if not ordered and depth == 0 and i > 0 \
7654                                 and graph_key == mylist[i-1][0] and \
7655                                 mylist[i-1][1] == 0:
7656                                 # An ordered node got a consecutive duplicate when the tree was
7657                                 # being filled in.
7658                                 del mylist[i]
7659                                 continue
7660                         if ordered and graph_key[-1] != "nomerge":
7661                                 last_merge_depth = depth
7662                                 continue
7663                         if depth >= last_merge_depth or \
7664                                 i < len(mylist) - 1 and \
7665                                 depth >= mylist[i+1][1]:
7666                                         del mylist[i]
7667
7668                 from portage import flatten
7669                 from portage.dep import use_reduce, paren_reduce
7670                 # files to fetch list - avoids counting a same file twice
7671                 # in size display (verbose mode)
7672                 myfetchlist=[]
7673
7674                 # Use this set to detect when all the "repoadd" strings are "[0]"
7675                 # and disable the entire repo display in this case.
7676                 repoadd_set = set()
7677
7678                 for mylist_index in xrange(len(mylist)):
7679                         x, depth, ordered = mylist[mylist_index]
7680                         pkg_type = x[0]
7681                         myroot = x[1]
7682                         pkg_key = x[2]
7683                         portdb = self.trees[myroot]["porttree"].dbapi
7684                         bindb  = self.trees[myroot]["bintree"].dbapi
7685                         vardb = self.trees[myroot]["vartree"].dbapi
7686                         vartree = self.trees[myroot]["vartree"]
7687                         pkgsettings = self.pkgsettings[myroot]
7688
7689                         fetch=" "
7690                         indent = " " * depth
7691
7692                         if isinstance(x, Blocker):
7693                                 if x.satisfied:
7694                                         blocker_style = "PKG_BLOCKER_SATISFIED"
7695                                         addl = "%s  %s  " % (colorize(blocker_style, "b"), fetch)
7696                                 else:
7697                                         blocker_style = "PKG_BLOCKER"
7698                                         addl = "%s  %s  " % (colorize(blocker_style, "B"), fetch)
7699                                 if ordered:
7700                                         counters.blocks += 1
7701                                         if x.satisfied:
7702                                                 counters.blocks_satisfied += 1
7703                                 resolved = portage.key_expand(
7704                                         str(x.atom).lstrip("!"), mydb=vardb, settings=pkgsettings)
7705                                 if "--columns" in self.myopts and "--quiet" in self.myopts:
7706                                         addl += " " + colorize(blocker_style, resolved)
7707                                 else:
7708                                         addl = "[%s %s] %s%s" % \
7709                                                 (colorize(blocker_style, "blocks"),
7710                                                 addl, indent, colorize(blocker_style, resolved))
7711                                 block_parents = self._blocker_parents.parent_nodes(x)
7712                                 block_parents = set([pnode[2] for pnode in block_parents])
7713                                 block_parents = ", ".join(block_parents)
7714                                 if resolved!=x[2]:
7715                                         addl += colorize(blocker_style,
7716                                                 " (\"%s\" is blocking %s)") % \
7717                                                 (str(x.atom).lstrip("!"), block_parents)
7718                                 else:
7719                                         addl += colorize(blocker_style,
7720                                                 " (is blocking %s)") % block_parents
7721                                 if isinstance(x, Blocker) and x.satisfied:
7722                                         if columns:
7723                                                 continue
7724                                         p.append(addl)
7725                                 else:
7726                                         blockers.append(addl)
7727                         else:
7728                                 pkg_status = x[3]
7729                                 pkg_merge = ordered and pkg_status == "merge"
7730                                 if not pkg_merge and pkg_status == "merge":
7731                                         pkg_status = "nomerge"
7732                                 built = pkg_type != "ebuild"
7733                                 installed = pkg_type == "installed"
7734                                 pkg = x
7735                                 metadata = pkg.metadata
7736                                 ebuild_path = None
7737                                 repo_name = metadata["repository"]
7738                                 if pkg_type == "ebuild":
7739                                         ebuild_path = portdb.findname(pkg_key)
7740                                         if not ebuild_path: # shouldn't happen
7741                                                 raise portage.exception.PackageNotFound(pkg_key)
7742                                         repo_path_real = os.path.dirname(os.path.dirname(
7743                                                 os.path.dirname(ebuild_path)))
7744                                 else:
7745                                         repo_path_real = portdb.getRepositoryPath(repo_name)
7746                                 pkg_use = list(pkg.use.enabled)
7747                                 try:
7748                                         restrict = flatten(use_reduce(paren_reduce(
7749                                                 pkg.metadata["RESTRICT"]), uselist=pkg_use))
7750                                 except portage.exception.InvalidDependString, e:
7751                                         if not pkg.installed:
7752                                                 show_invalid_depstring_notice(x,
7753                                                         pkg.metadata["RESTRICT"], str(e))
7754                                                 del e
7755                                                 return 1
7756                                         restrict = []
7757                                 if "ebuild" == pkg_type and x[3] != "nomerge" and \
7758                                         "fetch" in restrict:
7759                                         fetch = red("F")
7760                                         if ordered:
7761                                                 counters.restrict_fetch += 1
7762                                         if portdb.fetch_check(pkg_key, pkg_use):
7763                                                 fetch = green("f")
7764                                                 if ordered:
7765                                                         counters.restrict_fetch_satisfied += 1
7766
7767                                 #we need to use "--emptrytree" testing here rather than "empty" param testing because "empty"
7768                                 #param is used for -u, where you still *do* want to see when something is being upgraded.
7769                                 myoldbest = []
7770                                 myinslotlist = None
7771                                 installed_versions = vardb.match(portage.cpv_getkey(pkg_key))
7772                                 if vardb.cpv_exists(pkg_key):
7773                                         addl="  "+yellow("R")+fetch+"  "
7774                                         if ordered:
7775                                                 if pkg_merge:
7776                                                         counters.reinst += 1
7777                                                 elif pkg_status == "uninstall":
7778                                                         counters.uninst += 1
7779                                 # filter out old-style virtual matches
7780                                 elif installed_versions and \
7781                                         portage.cpv_getkey(installed_versions[0]) == \
7782                                         portage.cpv_getkey(pkg_key):
7783                                         myinslotlist = vardb.match(pkg.slot_atom)
7784                                         # If this is the first install of a new-style virtual, we
7785                                         # need to filter out old-style virtual matches.
7786                                         if myinslotlist and \
7787                                                 portage.cpv_getkey(myinslotlist[0]) != \
7788                                                 portage.cpv_getkey(pkg_key):
7789                                                 myinslotlist = None
7790                                         if myinslotlist:
7791                                                 myoldbest = myinslotlist[:]
7792                                                 addl = "   " + fetch
7793                                                 if not portage.dep.cpvequal(pkg_key,
7794                                                         portage.best([pkg_key] + myoldbest)):
7795                                                         # Downgrade in slot
7796                                                         addl += turquoise("U")+blue("D")
7797                                                         if ordered:
7798                                                                 counters.downgrades += 1
7799                                                 else:
7800                                                         # Update in slot
7801                                                         addl += turquoise("U") + " "
7802                                                         if ordered:
7803                                                                 counters.upgrades += 1
7804                                         else:
7805                                                 # New slot, mark it new.
7806                                                 addl = " " + green("NS") + fetch + "  "
7807                                                 myoldbest = vardb.match(portage.cpv_getkey(pkg_key))
7808                                                 if ordered:
7809                                                         counters.newslot += 1
7810
7811                                         if "--changelog" in self.myopts:
7812                                                 inst_matches = vardb.match(pkg.slot_atom)
7813                                                 if inst_matches:
7814                                                         changelogs.extend(self.calc_changelog(
7815                                                                 portdb.findname(pkg_key),
7816                                                                 inst_matches[0], pkg_key))
7817                                 else:
7818                                         addl = " " + green("N") + " " + fetch + "  "
7819                                         if ordered:
7820                                                 counters.new += 1
7821
7822                                 verboseadd = ""
7823                                 repoadd = None
7824
7825                                 if True:
7826                                         # USE flag display
7827                                         forced_flags = set()
7828                                         pkgsettings.setcpv(pkg) # for package.use.{mask,force}
7829                                         forced_flags.update(pkgsettings.useforce)
7830                                         forced_flags.update(pkgsettings.usemask)
7831
7832                                         cur_use = [flag for flag in pkg.use.enabled \
7833                                                 if flag in pkg.iuse.all]
7834                                         cur_iuse = sorted(pkg.iuse.all)
7835
7836                                         if myoldbest and myinslotlist:
7837                                                 previous_cpv = myoldbest[0]
7838                                         else:
7839                                                 previous_cpv = pkg.cpv
7840                                         if vardb.cpv_exists(previous_cpv):
7841                                                 old_iuse, old_use = vardb.aux_get(
7842                                                                 previous_cpv, ["IUSE", "USE"])
7843                                                 old_iuse = list(set(
7844                                                         filter_iuse_defaults(old_iuse.split())))
7845                                                 old_iuse.sort()
7846                                                 old_use = old_use.split()
7847                                                 is_new = False
7848                                         else:
7849                                                 old_iuse = []
7850                                                 old_use = []
7851                                                 is_new = True
7852
7853                                         old_use = [flag for flag in old_use if flag in old_iuse]
7854
7855                                         use_expand = pkgsettings["USE_EXPAND"].lower().split()
7856                                         use_expand.sort()
7857                                         use_expand.reverse()
7858                                         use_expand_hidden = \
7859                                                 pkgsettings["USE_EXPAND_HIDDEN"].lower().split()
7860
7861                                         def map_to_use_expand(myvals, forcedFlags=False,
7862                                                 removeHidden=True):
7863                                                 ret = {}
7864                                                 forced = {}
7865                                                 for exp in use_expand:
7866                                                         ret[exp] = []
7867                                                         forced[exp] = set()
7868                                                         for val in myvals[:]:
7869                                                                 if val.startswith(exp.lower()+"_"):
7870                                                                         if val in forced_flags:
7871                                                                                 forced[exp].add(val[len(exp)+1:])
7872                                                                         ret[exp].append(val[len(exp)+1:])
7873                                                                         myvals.remove(val)
7874                                                 ret["USE"] = myvals
7875                                                 forced["USE"] = [val for val in myvals \
7876                                                         if val in forced_flags]
7877                                                 if removeHidden:
7878                                                         for exp in use_expand_hidden:
7879                                                                 ret.pop(exp, None)
7880                                                 if forcedFlags:
7881                                                         return ret, forced
7882                                                 return ret
7883
7884                                         # Prevent USE_EXPAND_HIDDEN flags from being hidden if they
7885                                         # are the only thing that triggered reinstallation.
7886                                         reinst_flags_map = {}
7887                                         reinstall_for_flags = self._reinstall_nodes.get(pkg)
7888                                         reinst_expand_map = None
7889                                         if reinstall_for_flags:
7890                                                 reinst_flags_map = map_to_use_expand(
7891                                                         list(reinstall_for_flags), removeHidden=False)
7892                                                 for k in list(reinst_flags_map):
7893                                                         if not reinst_flags_map[k]:
7894                                                                 del reinst_flags_map[k]
7895                                                 if not reinst_flags_map.get("USE"):
7896                                                         reinst_expand_map = reinst_flags_map.copy()
7897                                                         reinst_expand_map.pop("USE", None)
7898                                         if reinst_expand_map and \
7899                                                 not set(reinst_expand_map).difference(
7900                                                 use_expand_hidden):
7901                                                 use_expand_hidden = \
7902                                                         set(use_expand_hidden).difference(
7903                                                         reinst_expand_map)
7904
7905                                         cur_iuse_map, iuse_forced = \
7906                                                 map_to_use_expand(cur_iuse, forcedFlags=True)
7907                                         cur_use_map = map_to_use_expand(cur_use)
7908                                         old_iuse_map = map_to_use_expand(old_iuse)
7909                                         old_use_map = map_to_use_expand(old_use)
7910
7911                                         use_expand.sort()
7912                                         use_expand.insert(0, "USE")
7913                                         
7914                                         for key in use_expand:
7915                                                 if key in use_expand_hidden:
7916                                                         continue
7917                                                 verboseadd += create_use_string(key.upper(),
7918                                                         cur_iuse_map[key], iuse_forced[key],
7919                                                         cur_use_map[key], old_iuse_map[key],
7920                                                         old_use_map[key], is_new,
7921                                                         reinst_flags_map.get(key))
7922
7923                                 if verbosity == 3:
7924                                         # size verbose
7925                                         mysize=0
7926                                         if pkg_type == "ebuild" and pkg_merge:
7927                                                 try:
7928                                                         myfilesdict = portdb.getfetchsizes(pkg_key,
7929                                                                 useflags=pkg_use, debug=self.edebug)
7930                                                 except portage.exception.InvalidDependString, e:
7931                                                         src_uri = portdb.aux_get(pkg_key, ["SRC_URI"])[0]
7932                                                         show_invalid_depstring_notice(x, src_uri, str(e))
7933                                                         del e
7934                                                         return 1
7935                                                 if myfilesdict is None:
7936                                                         myfilesdict="[empty/missing/bad digest]"
7937                                                 else:
7938                                                         for myfetchfile in myfilesdict:
7939                                                                 if myfetchfile not in myfetchlist:
7940                                                                         mysize+=myfilesdict[myfetchfile]
7941                                                                         myfetchlist.append(myfetchfile)
7942                                                         if ordered:
7943                                                                 counters.totalsize += mysize
7944                                                 verboseadd += format_size(mysize)
7945
7946                                         # overlay verbose
7947                                         # assign index for a previous version in the same slot
7948                                         has_previous = False
7949                                         repo_name_prev = None
7950                                         slot_atom = "%s:%s" % (portage.dep_getkey(pkg_key),
7951                                                 metadata["SLOT"])
7952                                         slot_matches = vardb.match(slot_atom)
7953                                         if slot_matches:
7954                                                 has_previous = True
7955                                                 repo_name_prev = vardb.aux_get(slot_matches[0],
7956                                                         ["repository"])[0]
7957
7958                                         # now use the data to generate output
7959                                         if pkg.installed or not has_previous:
7960                                                 repoadd = repo_display.repoStr(repo_path_real)
7961                                         else:
7962                                                 repo_path_prev = None
7963                                                 if repo_name_prev:
7964                                                         repo_path_prev = portdb.getRepositoryPath(
7965                                                                 repo_name_prev)
7966                                                 if repo_path_prev == repo_path_real:
7967                                                         repoadd = repo_display.repoStr(repo_path_real)
7968                                                 else:
7969                                                         repoadd = "%s=>%s" % (
7970                                                                 repo_display.repoStr(repo_path_prev),
7971                                                                 repo_display.repoStr(repo_path_real))
7972                                         if repoadd:
7973                                                 repoadd_set.add(repoadd)
7974
7975                                 xs = [portage.cpv_getkey(pkg_key)] + \
7976                                         list(portage.catpkgsplit(pkg_key)[2:])
7977                                 if xs[2] == "r0":
7978                                         xs[2] = ""
7979                                 else:
7980                                         xs[2] = "-" + xs[2]
7981
7982                                 mywidth = 130
7983                                 if "COLUMNWIDTH" in self.settings:
7984                                         try:
7985                                                 mywidth = int(self.settings["COLUMNWIDTH"])
7986                                         except ValueError, e:
7987                                                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
7988                                                 portage.writemsg(
7989                                                         "!!! Unable to parse COLUMNWIDTH='%s'\n" % \
7990                                                         self.settings["COLUMNWIDTH"], noiselevel=-1)
7991                                                 del e
7992                                 oldlp = mywidth - 30
7993                                 newlp = oldlp - 30
7994
7995                                 # Convert myoldbest from a list to a string.
7996                                 if not myoldbest:
7997                                         myoldbest = ""
7998                                 else:
7999                                         for pos, key in enumerate(myoldbest):
8000                                                 key = portage.catpkgsplit(key)[2] + \
8001                                                         "-" + portage.catpkgsplit(key)[3]
8002                                                 if key[-3:] == "-r0":
8003                                                         key = key[:-3]
8004                                                 myoldbest[pos] = key
8005                                         myoldbest = blue("["+", ".join(myoldbest)+"]")
8006
8007                                 pkg_cp = xs[0]
8008                                 root_config = self.roots[myroot]
8009                                 system_set = root_config.sets["system"]
8010                                 world_set  = root_config.sets["world"]
8011
8012                                 pkg_system = False
8013                                 pkg_world = False
8014                                 try:
8015                                         pkg_system = system_set.findAtomForPackage(pkg)
8016                                         pkg_world  = world_set.findAtomForPackage(pkg)
8017                                         if not (oneshot or pkg_world) and \
8018                                                 myroot == self.target_root and \
8019                                                 favorites_set.findAtomForPackage(pkg):
8020                                                 # Maybe it will be added to world now.
8021                                                 if create_world_atom(pkg, favorites_set, root_config):
8022                                                         pkg_world = True
8023                                 except portage.exception.InvalidDependString:
8024                                         # This is reported elsewhere if relevant.
8025                                         pass
8026
8027                                 def pkgprint(pkg_str):
8028                                         if pkg_merge:
8029                                                 if pkg_system:
8030                                                         return colorize("PKG_MERGE_SYSTEM", pkg_str)
8031                                                 elif pkg_world:
8032                                                         return colorize("PKG_MERGE_WORLD", pkg_str)
8033                                                 else:
8034                                                         return colorize("PKG_MERGE", pkg_str)
8035                                         elif pkg_status == "uninstall":
8036                                                 return colorize("PKG_UNINSTALL", pkg_str)
8037                                         else:
8038                                                 if pkg_system:
8039                                                         return colorize("PKG_NOMERGE_SYSTEM", pkg_str)
8040                                                 elif pkg_world:
8041                                                         return colorize("PKG_NOMERGE_WORLD", pkg_str)
8042                                                 else:
8043                                                         return colorize("PKG_NOMERGE", pkg_str)
8044
8045                                 try:
8046                                         properties = flatten(use_reduce(paren_reduce(
8047                                                 pkg.metadata["PROPERTIES"]), uselist=pkg.use.enabled))
8048                                 except portage.exception.InvalidDependString, e:
8049                                         if not pkg.installed:
8050                                                 show_invalid_depstring_notice(pkg,
8051                                                         pkg.metadata["PROPERTIES"], str(e))
8052                                                 del e
8053                                                 return 1
8054                                         properties = []
8055                                 interactive = "interactive" in properties
8056                                 if interactive and pkg.operation == "merge":
8057                                         addl = colorize("WARN", "I") + addl[1:]
8058                                         if ordered:
8059                                                 counters.interactive += 1
8060
8061                                 if x[1]!="/":
8062                                         if myoldbest:
8063                                                 myoldbest +=" "
8064                                         if "--columns" in self.myopts:
8065                                                 if "--quiet" in self.myopts:
8066                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8067                                                         myprint=myprint+darkblue(" "+xs[1]+xs[2])+" "
8068                                                         myprint=myprint+myoldbest
8069                                                         myprint=myprint+darkgreen("to "+x[1])
8070                                                         verboseadd = None
8071                                                 else:
8072                                                         if not pkg_merge:
8073                                                                 myprint = "[%s] %s%s" % \
8074                                                                         (pkgprint(pkg_status.ljust(13)),
8075                                                                         indent, pkgprint(pkg.cp))
8076                                                         else:
8077                                                                 myprint = "[%s %s] %s%s" % \
8078                                                                         (pkgprint(pkg.type_name), addl,
8079                                                                         indent, pkgprint(pkg.cp))
8080                                                         if (newlp-nc_len(myprint)) > 0:
8081                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8082                                                         myprint=myprint+"["+darkblue(xs[1]+xs[2])+"] "
8083                                                         if (oldlp-nc_len(myprint)) > 0:
8084                                                                 myprint=myprint+" "*(oldlp-nc_len(myprint))
8085                                                         myprint=myprint+myoldbest
8086                                                         myprint += darkgreen("to " + pkg.root)
8087                                         else:
8088                                                 if not pkg_merge:
8089                                                         myprint = "[%s] " % pkgprint(pkg_status.ljust(13))
8090                                                 else:
8091                                                         myprint = "[" + pkg_type + " " + addl + "] "
8092                                                 myprint += indent + pkgprint(pkg_key) + " " + \
8093                                                         myoldbest + darkgreen("to " + myroot)
8094                                 else:
8095                                         if "--columns" in self.myopts:
8096                                                 if "--quiet" in self.myopts:
8097                                                         myprint=addl+" "+indent+pkgprint(pkg_cp)
8098                                                         myprint=myprint+" "+green(xs[1]+xs[2])+" "
8099                                                         myprint=myprint+myoldbest
8100                                                         verboseadd = None
8101                                                 else:
8102                                                         if not pkg_merge:
8103                                                                 myprint = "[%s] %s%s" % \
8104                                                                         (pkgprint(pkg_status.ljust(13)),
8105                                                                         indent, pkgprint(pkg.cp))
8106                                                         else:
8107                                                                 myprint = "[%s %s] %s%s" % \
8108                                                                         (pkgprint(pkg.type_name), addl,
8109                                                                         indent, pkgprint(pkg.cp))
8110                                                         if (newlp-nc_len(myprint)) > 0:
8111                                                                 myprint=myprint+(" "*(newlp-nc_len(myprint)))
8112                                                         myprint=myprint+green(" ["+xs[1]+xs[2]+"] ")
8113                                                         if (oldlp-nc_len(myprint)) > 0:
8114                                                                 myprint=myprint+(" "*(oldlp-nc_len(myprint)))
8115                                                         myprint += myoldbest
8116                                         else:
8117                                                 if not pkg_merge:
8118                                                         myprint = "[%s] %s%s %s" % \
8119                                                                 (pkgprint(pkg_status.ljust(13)),
8120                                                                 indent, pkgprint(pkg.cpv),
8121                                                                 myoldbest)
8122                                                 else:
8123                                                         myprint = "[%s %s] %s%s %s" % \
8124                                                                 (pkgprint(pkg_type), addl, indent,
8125                                                                 pkgprint(pkg.cpv), myoldbest)
8126
8127                                 if columns and pkg.operation == "uninstall":
8128                                         continue
8129                                 p.append((myprint, verboseadd, repoadd))
8130
8131                                 if "--tree" not in self.myopts and \
8132                                         "--quiet" not in self.myopts and \
8133                                         not self._opts_no_restart.intersection(self.myopts) and \
8134                                         pkg.root == self._running_root.root and \
8135                                         portage.match_from_list(
8136                                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]) and \
8137                                         not vardb.cpv_exists(pkg.cpv) and \
8138                                         "--quiet" not in self.myopts:
8139                                                 if mylist_index < len(mylist) - 1:
8140                                                         p.append(colorize("WARN", "*** Portage will stop merging at this point and reload itself,"))
8141                                                         p.append(colorize("WARN", "    then resume the merge."))
8142
8143                 out = sys.stdout
8144                 show_repos = repoadd_set and repoadd_set != set(["0"])
8145
8146                 for x in p:
8147                         if isinstance(x, basestring):
8148                                 out.write("%s\n" % (x,))
8149                                 continue
8150
8151                         myprint, verboseadd, repoadd = x
8152
8153                         if verboseadd:
8154                                 myprint += " " + verboseadd
8155
8156                         if show_repos and repoadd:
8157                                 myprint += " " + teal("[%s]" % repoadd)
8158
8159                         out.write("%s\n" % (myprint,))
8160
8161                 for x in blockers:
8162                         print x
8163
8164                 if verbosity == 3:
8165                         print
8166                         print counters
8167                         if show_repos:
8168                                 sys.stdout.write(str(repo_display))
8169
8170                 if "--changelog" in self.myopts:
8171                         print
8172                         for revision,text in changelogs:
8173                                 print bold('*'+revision)
8174                                 sys.stdout.write(text)
8175
8176                 sys.stdout.flush()
8177                 return os.EX_OK
8178
8179         def display_problems(self):
8180                 """
8181                 Display problems with the dependency graph such as slot collisions.
8182                 This is called internally by display() to show the problems _after_
8183                 the merge list where it is most likely to be seen, but if display()
8184                 is not going to be called then this method should be called explicitly
8185                 to ensure that the user is notified of problems with the graph.
8186
8187                 All output goes to stderr, except for unsatisfied dependencies which
8188                 go to stdout for parsing by programs such as autounmask.
8189                 """
8190
8191                 # Note that show_masked_packages() sends it's output to
8192                 # stdout, and some programs such as autounmask parse the
8193                 # output in cases when emerge bails out. However, when
8194                 # show_masked_packages() is called for installed packages
8195                 # here, the message is a warning that is more appropriate
8196                 # to send to stderr, so temporarily redirect stdout to
8197                 # stderr. TODO: Fix output code so there's a cleaner way
8198                 # to redirect everything to stderr.
8199                 sys.stdout.flush()
8200                 sys.stderr.flush()
8201                 stdout = sys.stdout
8202                 try:
8203                         sys.stdout = sys.stderr
8204                         self._display_problems()
8205                 finally:
8206                         sys.stdout = stdout
8207                         sys.stdout.flush()
8208                         sys.stderr.flush()
8209
8210                 # This goes to stdout for parsing by programs like autounmask.
8211                 for pargs, kwargs in self._unsatisfied_deps_for_display:
8212                         self._show_unsatisfied_dep(*pargs, **kwargs)
8213
8214         def _display_problems(self):
8215                 if self._circular_deps_for_display is not None:
8216                         self._show_circular_deps(
8217                                 self._circular_deps_for_display)
8218
8219                 # The user is only notified of a slot conflict if
8220                 # there are no unresolvable blocker conflicts.
8221                 if self._unsatisfied_blockers_for_display is not None:
8222                         self._show_unsatisfied_blockers(
8223                                 self._unsatisfied_blockers_for_display)
8224                 else:
8225                         self._show_slot_collision_notice()
8226
8227                 # TODO: Add generic support for "set problem" handlers so that
8228                 # the below warnings aren't special cases for world only.
8229
8230                 if self._missing_args:
8231                         world_problems = False
8232                         if "world" in self._sets:
8233                                 # Filter out indirect members of world (from nested sets)
8234                                 # since only direct members of world are desired here.
8235                                 world_set = self.roots[self.target_root].sets["world"]
8236                                 for arg, atom in self._missing_args:
8237                                         if arg.name == "world" and atom in world_set:
8238                                                 world_problems = True
8239                                                 break
8240
8241                         if world_problems:
8242                                 sys.stderr.write("\n!!! Problems have been " + \
8243                                         "detected with your world file\n")
8244                                 sys.stderr.write("!!! Please run " + \
8245                                         green("emaint --check world")+"\n\n")
8246
8247                 if self._missing_args:
8248                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8249                                 " Ebuilds for the following packages are either all\n")
8250                         sys.stderr.write(colorize("BAD", "!!!") + \
8251                                 " masked or don't exist:\n")
8252                         sys.stderr.write(" ".join(str(atom) for arg, atom in \
8253                                 self._missing_args) + "\n")
8254
8255                 if self._pprovided_args:
8256                         arg_refs = {}
8257                         for arg, atom in self._pprovided_args:
8258                                 if isinstance(arg, SetArg):
8259                                         parent = arg.name
8260                                         arg_atom = (atom, atom)
8261                                 else:
8262                                         parent = "args"
8263                                         arg_atom = (arg.arg, atom)
8264                                 refs = arg_refs.setdefault(arg_atom, [])
8265                                 if parent not in refs:
8266                                         refs.append(parent)
8267                         msg = []
8268                         msg.append(bad("\nWARNING: "))
8269                         if len(self._pprovided_args) > 1:
8270                                 msg.append("Requested packages will not be " + \
8271                                         "merged because they are listed in\n")
8272                         else:
8273                                 msg.append("A requested package will not be " + \
8274                                         "merged because it is listed in\n")
8275                         msg.append("package.provided:\n\n")
8276                         problems_sets = set()
8277                         for (arg, atom), refs in arg_refs.iteritems():
8278                                 ref_string = ""
8279                                 if refs:
8280                                         problems_sets.update(refs)
8281                                         refs.sort()
8282                                         ref_string = ", ".join(["'%s'" % name for name in refs])
8283                                         ref_string = " pulled in by " + ref_string
8284                                 msg.append("  %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
8285                         msg.append("\n")
8286                         if "world" in problems_sets:
8287                                 msg.append("This problem can be solved in one of the following ways:\n\n")
8288                                 msg.append("  A) Use emaint to clean offending packages from world (if not installed).\n")
8289                                 msg.append("  B) Uninstall offending packages (cleans them from world).\n")
8290                                 msg.append("  C) Remove offending entries from package.provided.\n\n")
8291                                 msg.append("The best course of action depends on the reason that an offending\n")
8292                                 msg.append("package.provided entry exists.\n\n")
8293                         sys.stderr.write("".join(msg))
8294
8295                 masked_packages = []
8296                 for pkg in self._masked_installed:
8297                         root_config = pkg.root_config
8298                         pkgsettings = self.pkgsettings[pkg.root]
8299                         mreasons = get_masking_status(pkg, pkgsettings, root_config)
8300                         masked_packages.append((root_config, pkgsettings,
8301                                 pkg.cpv, pkg.metadata, mreasons))
8302                 if masked_packages:
8303                         sys.stderr.write("\n" + colorize("BAD", "!!!") + \
8304                                 " The following installed packages are masked:\n")
8305                         show_masked_packages(masked_packages)
8306                         show_mask_docs()
8307                         print
8308
8309         def calc_changelog(self,ebuildpath,current,next):
8310                 if ebuildpath == None or not os.path.exists(ebuildpath):
8311                         return []
8312                 current = '-'.join(portage.catpkgsplit(current)[1:])
8313                 if current.endswith('-r0'):
8314                         current = current[:-3]
8315                 next = '-'.join(portage.catpkgsplit(next)[1:])
8316                 if next.endswith('-r0'):
8317                         next = next[:-3]
8318                 changelogpath = os.path.join(os.path.split(ebuildpath)[0],'ChangeLog')
8319                 try:
8320                         changelog = open(changelogpath).read()
8321                 except SystemExit, e:
8322                         raise # Needed else can't exit
8323                 except:
8324                         return []
8325                 divisions = self.find_changelog_tags(changelog)
8326                 #print 'XX from',current,'to',next
8327                 #for div,text in divisions: print 'XX',div
8328                 # skip entries for all revisions above the one we are about to emerge
8329                 for i in range(len(divisions)):
8330                         if divisions[i][0]==next:
8331                                 divisions = divisions[i:]
8332                                 break
8333                 # find out how many entries we are going to display
8334                 for i in range(len(divisions)):
8335                         if divisions[i][0]==current:
8336                                 divisions = divisions[:i]
8337                                 break
8338                 else:
8339                     # couldnt find the current revision in the list. display nothing
8340                         return []
8341                 return divisions
8342
8343         def find_changelog_tags(self,changelog):
8344                 divs = []
8345                 release = None
8346                 while 1:
8347                         match = re.search(r'^\*\ ?([-a-zA-Z0-9_.+]*)(?:\ .*)?\n',changelog,re.M)
8348                         if match is None:
8349                                 if release is not None:
8350                                         divs.append((release,changelog))
8351                                 return divs
8352                         if release is not None:
8353                                 divs.append((release,changelog[:match.start()]))
8354                         changelog = changelog[match.end():]
8355                         release = match.group(1)
8356                         if release.endswith('.ebuild'):
8357                                 release = release[:-7]
8358                         if release.endswith('-r0'):
8359                                 release = release[:-3]
8360
8361         def saveNomergeFavorites(self):
8362                 """Find atoms in favorites that are not in the mergelist and add them
8363                 to the world file if necessary."""
8364                 for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
8365                         "--oneshot", "--onlydeps", "--pretend"):
8366                         if x in self.myopts:
8367                                 return
8368                 root_config = self.roots[self.target_root]
8369                 world_set = root_config.sets["world"]
8370
8371                 world_locked = False
8372                 if hasattr(world_set, "lock"):
8373                         world_set.lock()
8374                         world_locked = True
8375
8376                 if hasattr(world_set, "load"):
8377                         world_set.load() # maybe it's changed on disk
8378
8379                 args_set = self._sets["args"]
8380                 portdb = self.trees[self.target_root]["porttree"].dbapi
8381                 added_favorites = set()
8382                 for x in self._set_nodes:
8383                         pkg_type, root, pkg_key, pkg_status = x
8384                         if pkg_status != "nomerge":
8385                                 continue
8386
8387                         try:
8388                                 myfavkey = create_world_atom(x, args_set, root_config)
8389                                 if myfavkey:
8390                                         if myfavkey in added_favorites:
8391                                                 continue
8392                                         added_favorites.add(myfavkey)
8393                         except portage.exception.InvalidDependString, e:
8394                                 writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
8395                                         (pkg_key, str(e)), noiselevel=-1)
8396                                 writemsg("!!! see '%s'\n\n" % os.path.join(
8397                                         root, portage.VDB_PATH, pkg_key, "PROVIDE"), noiselevel=-1)
8398                                 del e
8399                 all_added = []
8400                 for k in self._sets:
8401                         if k in ("args", "world") or not root_config.sets[k].world_candidate:
8402                                 continue
8403                         s = SETPREFIX + k
8404                         if s in world_set:
8405                                 continue
8406                         all_added.append(SETPREFIX + k)
8407                 all_added.extend(added_favorites)
8408                 all_added.sort()
8409                 for a in all_added:
8410                         print ">>> Recording %s in \"world\" favorites file..." % \
8411                                 colorize("INFORM", str(a))
8412                 if all_added:
8413                         world_set.update(all_added)
8414
8415                 if world_locked:
8416                         world_set.unlock()
8417
8418         def loadResumeCommand(self, resume_data, skip_masked=False):
8419                 """
8420                 Add a resume command to the graph and validate it in the process.  This
8421                 will raise a PackageNotFound exception if a package is not available.
8422                 """
8423
8424                 if not isinstance(resume_data, dict):
8425                         return False
8426
8427                 mergelist = resume_data.get("mergelist")
8428                 if not isinstance(mergelist, list):
8429                         mergelist = []
8430
8431                 fakedb = self.mydbapi
8432                 trees = self.trees
8433                 serialized_tasks = []
8434                 masked_tasks = []
8435                 for x in mergelist:
8436                         if not (isinstance(x, list) and len(x) == 4):
8437                                 continue
8438                         pkg_type, myroot, pkg_key, action = x
8439                         if pkg_type not in self.pkg_tree_map:
8440                                 continue
8441                         if action != "merge":
8442                                 continue
8443                         tree_type = self.pkg_tree_map[pkg_type]
8444                         mydb = trees[myroot][tree_type].dbapi
8445                         db_keys = list(self._trees_orig[myroot][
8446                                 tree_type].dbapi._aux_cache_keys)
8447                         try:
8448                                 metadata = izip(db_keys, mydb.aux_get(pkg_key, db_keys))
8449                         except KeyError:
8450                                 # It does no exist or it is corrupt.
8451                                 if action == "uninstall":
8452                                         continue
8453                                 raise portage.exception.PackageNotFound(pkg_key)
8454                         installed = action == "uninstall"
8455                         built = pkg_type != "ebuild"
8456                         root_config = self.roots[myroot]
8457                         pkg = Package(built=built, cpv=pkg_key,
8458                                 installed=installed, metadata=metadata,
8459                                 operation=action, root_config=root_config,
8460                                 type_name=pkg_type)
8461                         if pkg_type == "ebuild":
8462                                 pkgsettings = self.pkgsettings[myroot]
8463                                 pkgsettings.setcpv(pkg)
8464                                 pkg.metadata["USE"] = pkgsettings["PORTAGE_USE"]
8465                         self._pkg_cache[pkg] = pkg
8466
8467                         root_config = self.roots[pkg.root]
8468                         if "merge" == pkg.operation and \
8469                                 not visible(root_config.settings, pkg):
8470                                 if skip_masked:
8471                                         masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
8472                                 else:
8473                                         self._unsatisfied_deps_for_display.append(
8474                                                 ((pkg.root, "="+pkg.cpv), {"myparent":None}))
8475
8476                         fakedb[myroot].cpv_inject(pkg)
8477                         serialized_tasks.append(pkg)
8478                         self.spinner.update()
8479
8480                 if self._unsatisfied_deps_for_display:
8481                         return False
8482
8483                 if not serialized_tasks or "--nodeps" in self.myopts:
8484                         self._serialized_tasks_cache = serialized_tasks
8485                         self._scheduler_graph = self.digraph
8486                 else:
8487                         self._select_package = self._select_pkg_from_graph
8488                         self.myparams.add("selective")
8489
8490                         favorites = resume_data.get("favorites")
8491                         args_set = self._sets["args"]
8492                         if isinstance(favorites, list):
8493                                 args = self._load_favorites(favorites)
8494                         else:
8495                                 args = []
8496
8497                         for task in serialized_tasks:
8498                                 if isinstance(task, Package) and \
8499                                         task.operation == "merge":
8500                                         if not self._add_pkg(task, None):
8501                                                 return False
8502
8503                         # Packages for argument atoms need to be explicitly
8504                         # added via _add_pkg() so that they are included in the
8505                         # digraph (needed at least for --tree display).
8506                         for arg in args:
8507                                 for atom in arg.set:
8508                                         pkg, existing_node = self._select_package(
8509                                                 arg.root_config.root, atom)
8510                                         if existing_node is None and \
8511                                                 pkg is not None:
8512                                                 if not self._add_pkg(pkg, Dependency(atom=atom,
8513                                                         root=pkg.root, parent=arg)):
8514                                                         return False
8515
8516                         # Allow unsatisfied deps here to avoid showing a masking
8517                         # message for an unsatisfied dep that isn't necessarily
8518                         # masked.
8519                         if not self._create_graph(allow_unsatisfied=True):
8520                                 return False
8521                         if masked_tasks or self._unsatisfied_deps:
8522                                 # This probably means that a required package
8523                                 # was dropped via --skipfirst. It makes the
8524                                 # resume list invalid, so convert it to a
8525                                 # UnsatisfiedResumeDep exception.
8526                                 raise self.UnsatisfiedResumeDep(self,
8527                                         masked_tasks + self._unsatisfied_deps)
8528                         self._serialized_tasks_cache = None
8529                         try:
8530                                 self.altlist()
8531                         except self._unknown_internal_error:
8532                                 return False
8533
8534                 return True
8535
8536         def _load_favorites(self, favorites):
8537                 """
8538                 Use a list of favorites to resume state from a
8539                 previous select_files() call. This creates similar
8540                 DependencyArg instances to those that would have
8541                 been created by the original select_files() call.
8542                 This allows Package instances to be matched with
8543                 DependencyArg instances during graph creation.
8544                 """
8545                 root_config = self.roots[self.target_root]
8546                 getSetAtoms = root_config.setconfig.getSetAtoms
8547                 sets = root_config.sets
8548                 args = []
8549                 for x in favorites:
8550                         if not isinstance(x, basestring):
8551                                 continue
8552                         if x in ("system", "world"):
8553                                 x = SETPREFIX + x
8554                         if x.startswith(SETPREFIX):
8555                                 s = x[len(SETPREFIX):]
8556                                 if s not in sets:
8557                                         continue
8558                                 if s in self._sets:
8559                                         continue
8560                                 # Recursively expand sets so that containment tests in
8561                                 # self._get_parent_sets() properly match atoms in nested
8562                                 # sets (like if world contains system).
8563                                 expanded_set = InternalPackageSet(
8564                                         initial_atoms=getSetAtoms(s))
8565                                 self._sets[s] = expanded_set
8566                                 args.append(SetArg(arg=x, set=expanded_set,
8567                                         root_config=root_config))
8568                         else:
8569                                 if not portage.isvalidatom(x):
8570                                         continue
8571                                 args.append(AtomArg(arg=x, atom=x,
8572                                         root_config=root_config))
8573
8574                 # Create the "args" package set from atoms and
8575                 # packages given as arguments.
8576                 args_set = self._sets["args"]
8577                 for arg in args:
8578                         if not isinstance(arg, (AtomArg, PackageArg)):
8579                                 continue
8580                         myatom = arg.atom
8581                         if myatom in args_set:
8582                                 continue
8583                         args_set.add(myatom)
8584                 self._set_atoms.update(chain(*self._sets.itervalues()))
8585                 atom_arg_map = self._atom_arg_map
8586                 for arg in args:
8587                         for atom in arg.set:
8588                                 atom_key = (atom, arg.root_config.root)
8589                                 refs = atom_arg_map.get(atom_key)
8590                                 if refs is None:
8591                                         refs = []
8592                                         atom_arg_map[atom_key] = refs
8593                                         if arg not in refs:
8594                                                 refs.append(arg)
8595                 return args
8596
8597         class UnsatisfiedResumeDep(portage.exception.PortageException):
8598                 """
8599                 A dependency of a resume list is not installed. This
8600                 can occur when a required package is dropped from the
8601                 merge list via --skipfirst.
8602                 """
8603                 def __init__(self, depgraph, value):
8604                         portage.exception.PortageException.__init__(self, value)
8605                         self.depgraph = depgraph
8606
8607         class _internal_exception(portage.exception.PortageException):
8608                 def __init__(self, value=""):
8609                         portage.exception.PortageException.__init__(self, value)
8610
8611         class _unknown_internal_error(_internal_exception):
8612                 """
8613                 Used by the depgraph internally to terminate graph creation.
8614                 The specific reason for the failure should have been dumped
8615                 to stderr, unfortunately, the exact reason for the failure
8616                 may not be known.
8617                 """
8618
8619         class _serialize_tasks_retry(_internal_exception):
8620                 """
8621                 This is raised by the _serialize_tasks() method when it needs to
8622                 be called again for some reason. The only case that it's currently
8623                 used for is when neglected dependencies need to be added to the
8624                 graph in order to avoid making a potentially unsafe decision.
8625                 """
8626
8627         class _dep_check_composite_db(portage.dbapi):
8628                 """
8629                 A dbapi-like interface that is optimized for use in dep_check() calls.
8630                 This is built on top of the existing depgraph package selection logic.
8631                 Some packages that have been added to the graph may be masked from this
8632                 view in order to influence the atom preference selection that occurs
8633                 via dep_check().
8634                 """
8635                 def __init__(self, depgraph, root):
8636                         portage.dbapi.__init__(self)
8637                         self._depgraph = depgraph
8638                         self._root = root
8639                         self._match_cache = {}
8640                         self._cpv_pkg_map = {}
8641
8642                 def match(self, atom):
8643                         ret = self._match_cache.get(atom)
8644                         if ret is not None:
8645                                 return ret[:]
8646                         orig_atom = atom
8647                         if "/" not in atom:
8648                                 atom = self._dep_expand(atom)
8649                         pkg, existing = self._depgraph._select_package(self._root, atom)
8650                         if not pkg:
8651                                 ret = []
8652                         else:
8653                                 # Return the highest available from select_package() as well as
8654                                 # any matching slots in the graph db.
8655                                 slots = set()
8656                                 slots.add(pkg.metadata["SLOT"])
8657                                 atom_cp = portage.dep_getkey(atom)
8658                                 if pkg.cp.startswith("virtual/"):
8659                                         # For new-style virtual lookahead that occurs inside
8660                                         # dep_check(), examine all slots. This is needed
8661                                         # so that newer slots will not unnecessarily be pulled in
8662                                         # when a satisfying lower slot is already installed. For
8663                                         # example, if virtual/jdk-1.4 is satisfied via kaffe then
8664                                         # there's no need to pull in a newer slot to satisfy a
8665                                         # virtual/jdk dependency.
8666                                         for db, pkg_type, built, installed, db_keys in \
8667                                                 self._depgraph._filtered_trees[self._root]["dbs"]:
8668                                                 for cpv in db.match(atom):
8669                                                         if portage.cpv_getkey(cpv) != pkg.cp:
8670                                                                 continue
8671                                                         slots.add(db.aux_get(cpv, ["SLOT"])[0])
8672                                 ret = []
8673                                 if self._visible(pkg):
8674                                         self._cpv_pkg_map[pkg.cpv] = pkg
8675                                         ret.append(pkg.cpv)
8676                                 slots.remove(pkg.metadata["SLOT"])
8677                                 while slots:
8678                                         slot_atom = "%s:%s" % (atom_cp, slots.pop())
8679                                         pkg, existing = self._depgraph._select_package(
8680                                                 self._root, slot_atom)
8681                                         if not pkg:
8682                                                 continue
8683                                         if not self._visible(pkg):
8684                                                 continue
8685                                         self._cpv_pkg_map[pkg.cpv] = pkg
8686                                         ret.append(pkg.cpv)
8687                                 if ret:
8688                                         self._cpv_sort_ascending(ret)
8689                         self._match_cache[orig_atom] = ret
8690                         return ret[:]
8691
8692                 def _visible(self, pkg):
8693                         if pkg.installed and "selective" not in self._depgraph.myparams:
8694                                 try:
8695                                         arg = self._depgraph._iter_atoms_for_pkg(pkg).next()
8696                                 except (StopIteration, portage.exception.InvalidDependString):
8697                                         arg = None
8698                                 if arg:
8699                                         return False
8700                         if pkg.installed:
8701                                 try:
8702                                         if not visible(
8703                                                 self._depgraph.pkgsettings[pkg.root], pkg):
8704                                                 return False
8705                                 except portage.exception.InvalidDependString:
8706                                         pass
8707                         return True
8708
8709                 def _dep_expand(self, atom):
8710                         """
8711                         This is only needed for old installed packages that may
8712                         contain atoms that are not fully qualified with a specific
8713                         category. Emulate the cpv_expand() function that's used by
8714                         dbapi.match() in cases like this. If there are multiple
8715                         matches, it's often due to a new-style virtual that has
8716                         been added, so try to filter those out to avoid raising
8717                         a ValueError.
8718                         """
8719                         root_config = self._depgraph.roots[self._root]
8720                         orig_atom = atom
8721                         expanded_atoms = self._depgraph._dep_expand(root_config, atom)
8722                         if len(expanded_atoms) > 1:
8723                                 non_virtual_atoms = []
8724                                 for x in expanded_atoms:
8725                                         if not portage.dep_getkey(x).startswith("virtual/"):
8726                                                 non_virtual_atoms.append(x)
8727                                 if len(non_virtual_atoms) == 1:
8728                                         expanded_atoms = non_virtual_atoms
8729                         if len(expanded_atoms) > 1:
8730                                 # compatible with portage.cpv_expand()
8731                                 raise portage.exception.AmbiguousPackageName(
8732                                         [portage.dep_getkey(x) for x in expanded_atoms])
8733                         if expanded_atoms:
8734                                 atom = expanded_atoms[0]
8735                         else:
8736                                 null_atom = insert_category_into_atom(atom, "null")
8737                                 null_cp = portage.dep_getkey(null_atom)
8738                                 cat, atom_pn = portage.catsplit(null_cp)
8739                                 virts_p = root_config.settings.get_virts_p().get(atom_pn)
8740                                 if virts_p:
8741                                         # Allow the resolver to choose which virtual.
8742                                         atom = insert_category_into_atom(atom, "virtual")
8743                                 else:
8744                                         atom = insert_category_into_atom(atom, "null")
8745                         return atom
8746
8747                 def aux_get(self, cpv, wants):
8748                         metadata = self._cpv_pkg_map[cpv].metadata
8749                         return [metadata.get(x, "") for x in wants]
8750
8751 class RepoDisplay(object):
8752         def __init__(self, roots):
8753                 self._shown_repos = {}
8754                 self._unknown_repo = False
8755                 repo_paths = set()
8756                 for root_config in roots.itervalues():
8757                         portdir = root_config.settings.get("PORTDIR")
8758                         if portdir:
8759                                 repo_paths.add(portdir)
8760                         overlays = root_config.settings.get("PORTDIR_OVERLAY")
8761                         if overlays:
8762                                 repo_paths.update(overlays.split())
8763                 repo_paths = list(repo_paths)
8764                 self._repo_paths = repo_paths
8765                 self._repo_paths_real = [ os.path.realpath(repo_path) \
8766                         for repo_path in repo_paths ]
8767
8768                 # pre-allocate index for PORTDIR so that it always has index 0.
8769                 for root_config in roots.itervalues():
8770                         portdb = root_config.trees["porttree"].dbapi
8771                         portdir = portdb.porttree_root
8772                         if portdir:
8773                                 self.repoStr(portdir)
8774
8775         def repoStr(self, repo_path_real):
8776                 real_index = -1
8777                 if repo_path_real:
8778                         real_index = self._repo_paths_real.index(repo_path_real)
8779                 if real_index == -1:
8780                         s = "?"
8781                         self._unknown_repo = True
8782                 else:
8783                         shown_repos = self._shown_repos
8784                         repo_paths = self._repo_paths
8785                         repo_path = repo_paths[real_index]
8786                         index = shown_repos.get(repo_path)
8787                         if index is None:
8788                                 index = len(shown_repos)
8789                                 shown_repos[repo_path] = index
8790                         s = str(index)
8791                 return s
8792
8793         def __str__(self):
8794                 output = []
8795                 shown_repos = self._shown_repos
8796                 unknown_repo = self._unknown_repo
8797                 if shown_repos or self._unknown_repo:
8798                         output.append("Portage tree and overlays:\n")
8799                 show_repo_paths = list(shown_repos)
8800                 for repo_path, repo_index in shown_repos.iteritems():
8801                         show_repo_paths[repo_index] = repo_path
8802                 if show_repo_paths:
8803                         for index, repo_path in enumerate(show_repo_paths):
8804                                 output.append(" "+teal("["+str(index)+"]")+" %s\n" % repo_path)
8805                 if unknown_repo:
8806                         output.append(" "+teal("[?]") + \
8807                                 " indicates that the source repository could not be determined\n")
8808                 return "".join(output)
8809
8810 class PackageCounters(object):
8811
8812         def __init__(self):
8813                 self.upgrades   = 0
8814                 self.downgrades = 0
8815                 self.new        = 0
8816                 self.newslot    = 0
8817                 self.reinst     = 0
8818                 self.uninst     = 0
8819                 self.blocks     = 0
8820                 self.blocks_satisfied         = 0
8821                 self.totalsize  = 0
8822                 self.restrict_fetch           = 0
8823                 self.restrict_fetch_satisfied = 0
8824                 self.interactive              = 0
8825
8826         def __str__(self):
8827                 total_installs = self.upgrades + self.downgrades + self.newslot + self.new + self.reinst
8828                 myoutput = []
8829                 details = []
8830                 myoutput.append("Total: %s package" % total_installs)
8831                 if total_installs != 1:
8832                         myoutput.append("s")
8833                 if total_installs != 0:
8834                         myoutput.append(" (")
8835                 if self.upgrades > 0:
8836                         details.append("%s upgrade" % self.upgrades)
8837                         if self.upgrades > 1:
8838                                 details[-1] += "s"
8839                 if self.downgrades > 0:
8840                         details.append("%s downgrade" % self.downgrades)
8841                         if self.downgrades > 1:
8842                                 details[-1] += "s"
8843                 if self.new > 0:
8844                         details.append("%s new" % self.new)
8845                 if self.newslot > 0:
8846                         details.append("%s in new slot" % self.newslot)
8847                         if self.newslot > 1:
8848                                 details[-1] += "s"
8849                 if self.reinst > 0:
8850                         details.append("%s reinstall" % self.reinst)
8851                         if self.reinst > 1:
8852                                 details[-1] += "s"
8853                 if self.uninst > 0:
8854                         details.append("%s uninstall" % self.uninst)
8855                         if self.uninst > 1:
8856                                 details[-1] += "s"
8857                 if self.interactive > 0:
8858                         details.append("%s %s" % (self.interactive,
8859                                 colorize("WARN", "interactive")))
8860                 myoutput.append(", ".join(details))
8861                 if total_installs != 0:
8862                         myoutput.append(")")
8863                 myoutput.append(", Size of downloads: %s" % format_size(self.totalsize))
8864                 if self.restrict_fetch:
8865                         myoutput.append("\nFetch Restriction: %s package" % \
8866                                 self.restrict_fetch)
8867                         if self.restrict_fetch > 1:
8868                                 myoutput.append("s")
8869                 if self.restrict_fetch_satisfied < self.restrict_fetch:
8870                         myoutput.append(bad(" (%s unsatisfied)") % \
8871                                 (self.restrict_fetch - self.restrict_fetch_satisfied))
8872                 if self.blocks > 0:
8873                         myoutput.append("\nConflict: %s block" % \
8874                                 self.blocks)
8875                         if self.blocks > 1:
8876                                 myoutput.append("s")
8877                         if self.blocks_satisfied < self.blocks:
8878                                 myoutput.append(bad(" (%s unsatisfied)") % \
8879                                         (self.blocks - self.blocks_satisfied))
8880                 return "".join(myoutput)
8881
8882 class PollSelectAdapter(PollConstants):
8883
8884         """
8885         Use select to emulate a poll object, for
8886         systems that don't support poll().
8887         """
8888
8889         def __init__(self):
8890                 self._registered = {}
8891                 self._select_args = [[], [], []]
8892
8893         def register(self, fd, *args):
8894                 """
8895                 Only POLLIN is currently supported!
8896                 """
8897                 if len(args) > 1:
8898                         raise TypeError(
8899                                 "register expected at most 2 arguments, got " + \
8900                                 repr(1 + len(args)))
8901
8902                 eventmask = PollConstants.POLLIN | \
8903                         PollConstants.POLLPRI | PollConstants.POLLOUT
8904                 if args:
8905                         eventmask = args[0]
8906
8907                 self._registered[fd] = eventmask
8908                 self._select_args = None
8909
8910         def unregister(self, fd):
8911                 self._select_args = None
8912                 del self._registered[fd]
8913
8914         def poll(self, *args):
8915                 if len(args) > 1:
8916                         raise TypeError(
8917                                 "poll expected at most 2 arguments, got " + \
8918                                 repr(1 + len(args)))
8919
8920                 timeout = None
8921                 if args:
8922                         timeout = args[0]
8923
8924                 select_args = self._select_args
8925                 if select_args is None:
8926                         select_args = [self._registered.keys(), [], []]
8927
8928                 if timeout is not None:
8929                         select_args = select_args[:]
8930                         # Translate poll() timeout args to select() timeout args:
8931                         #
8932                         #          | units        | value(s) for indefinite block
8933                         # ---------|--------------|------------------------------
8934                         #   poll   | milliseconds | omitted, negative, or None
8935                         # ---------|--------------|------------------------------
8936                         #   select | seconds      | omitted
8937                         # ---------|--------------|------------------------------
8938
8939                         if timeout is not None and timeout < 0:
8940                                 timeout = None
8941                         if timeout is not None:
8942                                 select_args.append(timeout / 1000)
8943
8944                 select_events = select.select(*select_args)
8945                 poll_events = []
8946                 for fd in select_events[0]:
8947                         poll_events.append((fd, PollConstants.POLLIN))
8948                 return poll_events
8949
8950 class SequentialTaskQueue(SlotObject):
8951
8952         __slots__ = ("max_jobs", "running_tasks") + \
8953                 ("_dirty", "_scheduling", "_task_queue")
8954
8955         def __init__(self, **kwargs):
8956                 SlotObject.__init__(self, **kwargs)
8957                 self._task_queue = deque()
8958                 self.running_tasks = set()
8959                 if self.max_jobs is None:
8960                         self.max_jobs = 1
8961                 self._dirty = True
8962
8963         def add(self, task):
8964                 self._task_queue.append(task)
8965                 self._dirty = True
8966
8967         def addFront(self, task):
8968                 self._task_queue.appendleft(task)
8969                 self._dirty = True
8970
8971         def schedule(self):
8972
8973                 if not self._dirty:
8974                         return False
8975
8976                 if not self:
8977                         return False
8978
8979                 if self._scheduling:
8980                         # Ignore any recursive schedule() calls triggered via
8981                         # self._task_exit().
8982                         return False
8983
8984                 self._scheduling = True
8985
8986                 task_queue = self._task_queue
8987                 running_tasks = self.running_tasks
8988                 max_jobs = self.max_jobs
8989                 state_changed = False
8990
8991                 while task_queue and \
8992                         (max_jobs is True or len(running_tasks) < max_jobs):
8993                         task = task_queue.popleft()
8994                         cancelled = getattr(task, "cancelled", None)
8995                         if not cancelled:
8996                                 running_tasks.add(task)
8997                                 task.addExitListener(self._task_exit)
8998                                 task.start()
8999                         state_changed = True
9000
9001                 self._dirty = False
9002                 self._scheduling = False
9003
9004                 return state_changed
9005
9006         def _task_exit(self, task):
9007                 """
9008                 Since we can always rely on exit listeners being called, the set of
9009                 running tasks is always pruned automatically and there is never any need
9010                 to actively prune it.
9011                 """
9012                 self.running_tasks.remove(task)
9013                 if self._task_queue:
9014                         self._dirty = True
9015
9016         def clear(self):
9017                 self._task_queue.clear()
9018                 running_tasks = self.running_tasks
9019                 while running_tasks:
9020                         task = running_tasks.pop()
9021                         task.removeExitListener(self._task_exit)
9022                         task.cancel()
9023                 self._dirty = False
9024
9025         def __nonzero__(self):
9026                 return bool(self._task_queue or self.running_tasks)
9027
9028         def __len__(self):
9029                 return len(self._task_queue) + len(self.running_tasks)
9030
9031 _can_poll_device = None
9032
9033 def can_poll_device():
9034         """
9035         Test if it's possible to use poll() on a device such as a pty. This
9036         is known to fail on Darwin.
9037         @rtype: bool
9038         @returns: True if poll() on a device succeeds, False otherwise.
9039         """
9040
9041         global _can_poll_device
9042         if _can_poll_device is not None:
9043                 return _can_poll_device
9044
9045         if not hasattr(select, "poll"):
9046                 _can_poll_device = False
9047                 return _can_poll_device
9048
9049         try:
9050                 dev_null = open('/dev/null', 'rb')
9051         except IOError:
9052                 _can_poll_device = False
9053                 return _can_poll_device
9054
9055         p = select.poll()
9056         p.register(dev_null.fileno(), PollConstants.POLLIN)
9057
9058         invalid_request = False
9059         for f, event in p.poll():
9060                 if event & PollConstants.POLLNVAL:
9061                         invalid_request = True
9062                         break
9063         dev_null.close()
9064
9065         _can_poll_device = not invalid_request
9066         return _can_poll_device
9067
9068 def create_poll_instance():
9069         """
9070         Create an instance of select.poll, or an instance of
9071         PollSelectAdapter there is no poll() implementation or
9072         it is broken somehow.
9073         """
9074         if can_poll_device():
9075                 return select.poll()
9076         return PollSelectAdapter()
9077
9078 getloadavg = getattr(os, "getloadavg", None)
9079 if getloadavg is None:
9080         def getloadavg():
9081                 """
9082                 Uses /proc/loadavg to emulate os.getloadavg().
9083                 Raises OSError if the load average was unobtainable.
9084                 """
9085                 try:
9086                         loadavg_str = open('/proc/loadavg').readline()
9087                 except IOError:
9088                         # getloadavg() is only supposed to raise OSError, so convert
9089                         raise OSError('unknown')
9090                 loadavg_split = loadavg_str.split()
9091                 if len(loadavg_split) < 3:
9092                         raise OSError('unknown')
9093                 loadavg_floats = []
9094                 for i in xrange(3):
9095                         try:
9096                                 loadavg_floats.append(float(loadavg_split[i]))
9097                         except ValueError:
9098                                 raise OSError('unknown')
9099                 return tuple(loadavg_floats)
9100
9101 class PollScheduler(object):
9102
9103         class _sched_iface_class(SlotObject):
9104                 __slots__ = ("register", "schedule", "unregister")
9105
9106         def __init__(self):
9107                 self._max_jobs = 1
9108                 self._max_load = None
9109                 self._jobs = 0
9110                 self._poll_event_queue = []
9111                 self._poll_event_handlers = {}
9112                 self._poll_event_handler_ids = {}
9113                 # Increment id for each new handler.
9114                 self._event_handler_id = 0
9115                 self._poll_obj = create_poll_instance()
9116                 self._scheduling = False
9117
9118         def _schedule(self):
9119                 """
9120                 Calls _schedule_tasks() and automatically returns early from
9121                 any recursive calls to this method that the _schedule_tasks()
9122                 call might trigger. This makes _schedule() safe to call from
9123                 inside exit listeners.
9124                 """
9125                 if self._scheduling:
9126                         return False
9127                 self._scheduling = True
9128                 try:
9129                         return self._schedule_tasks()
9130                 finally:
9131                         self._scheduling = False
9132
9133         def _running_job_count(self):
9134                 return self._jobs
9135
9136         def _can_add_job(self):
9137                 max_jobs = self._max_jobs
9138                 max_load = self._max_load
9139
9140                 if self._max_jobs is not True and \
9141                         self._running_job_count() >= self._max_jobs:
9142                         return False
9143
9144                 if max_load is not None and \
9145                         (max_jobs is True or max_jobs > 1) and \
9146                         self._running_job_count() >= 1:
9147                         try:
9148                                 avg1, avg5, avg15 = getloadavg()
9149                         except OSError:
9150                                 return False
9151
9152                         if avg1 >= max_load:
9153                                 return False
9154
9155                 return True
9156
9157         def _poll(self, timeout=None):
9158                 """
9159                 All poll() calls pass through here. The poll events
9160                 are added directly to self._poll_event_queue.
9161                 In order to avoid endless blocking, this raises
9162                 StopIteration if timeout is None and there are
9163                 no file descriptors to poll.
9164                 """
9165                 if not self._poll_event_handlers:
9166                         self._schedule()
9167                         if timeout is None and \
9168                                 not self._poll_event_handlers:
9169                                 raise StopIteration(
9170                                         "timeout is None and there are no poll() event handlers")
9171
9172                 # The following error is known to occur with Linux kernel versions
9173                 # less than 2.6.24:
9174                 #
9175                 #   select.error: (4, 'Interrupted system call')
9176                 #
9177                 # This error has been observed after a SIGSTOP, followed by SIGCONT.
9178                 # Treat it similar to EAGAIN if timeout is None, otherwise just return
9179                 # without any events.
9180                 while True:
9181                         try:
9182                                 self._poll_event_queue.extend(self._poll_obj.poll(timeout))
9183                                 break
9184                         except select.error, e:
9185                                 writemsg_level("\n!!! select error: %s\n" % (e,),
9186                                         level=logging.ERROR, noiselevel=-1)
9187                                 del e
9188                                 if timeout is not None:
9189                                         break
9190
9191         def _next_poll_event(self, timeout=None):
9192                 """
9193                 Since the _schedule_wait() loop is called by event
9194                 handlers from _poll_loop(), maintain a central event
9195                 queue for both of them to share events from a single
9196                 poll() call. In order to avoid endless blocking, this
9197                 raises StopIteration if timeout is None and there are
9198                 no file descriptors to poll.
9199                 """
9200                 if not self._poll_event_queue:
9201                         self._poll(timeout)
9202                 return self._poll_event_queue.pop()
9203
9204         def _poll_loop(self):
9205
9206                 event_handlers = self._poll_event_handlers
9207                 event_handled = False
9208
9209                 try:
9210                         while event_handlers:
9211                                 f, event = self._next_poll_event()
9212                                 handler, reg_id = event_handlers[f]
9213                                 handler(f, event)
9214                                 event_handled = True
9215                 except StopIteration:
9216                         event_handled = True
9217
9218                 if not event_handled:
9219                         raise AssertionError("tight loop")
9220
9221         def _schedule_yield(self):
9222                 """
9223                 Schedule for a short period of time chosen by the scheduler based
9224                 on internal state. Synchronous tasks should call this periodically
9225                 in order to allow the scheduler to service pending poll events. The
9226                 scheduler will call poll() exactly once, without blocking, and any
9227                 resulting poll events will be serviced.
9228                 """
9229                 event_handlers = self._poll_event_handlers
9230                 events_handled = 0
9231
9232                 if not event_handlers:
9233                         return bool(events_handled)
9234
9235                 if not self._poll_event_queue:
9236                         self._poll(0)
9237
9238                 try:
9239                         while event_handlers and self._poll_event_queue:
9240                                 f, event = self._next_poll_event()
9241                                 handler, reg_id = event_handlers[f]
9242                                 handler(f, event)
9243                                 events_handled += 1
9244                 except StopIteration:
9245                         events_handled += 1
9246
9247                 return bool(events_handled)
9248
9249         def _register(self, f, eventmask, handler):
9250                 """
9251                 @rtype: Integer
9252                 @return: A unique registration id, for use in schedule() or
9253                         unregister() calls.
9254                 """
9255                 if f in self._poll_event_handlers:
9256                         raise AssertionError("fd %d is already registered" % f)
9257                 self._event_handler_id += 1
9258                 reg_id = self._event_handler_id
9259                 self._poll_event_handler_ids[reg_id] = f
9260                 self._poll_event_handlers[f] = (handler, reg_id)
9261                 self._poll_obj.register(f, eventmask)
9262                 return reg_id
9263
9264         def _unregister(self, reg_id):
9265                 f = self._poll_event_handler_ids[reg_id]
9266                 self._poll_obj.unregister(f)
9267                 del self._poll_event_handlers[f]
9268                 del self._poll_event_handler_ids[reg_id]
9269
9270         def _schedule_wait(self, wait_ids):
9271                 """
9272                 Schedule until wait_id is not longer registered
9273                 for poll() events.
9274                 @type wait_id: int
9275                 @param wait_id: a task id to wait for
9276                 """
9277                 event_handlers = self._poll_event_handlers
9278                 handler_ids = self._poll_event_handler_ids
9279                 event_handled = False
9280
9281                 if isinstance(wait_ids, int):
9282                         wait_ids = frozenset([wait_ids])
9283
9284                 try:
9285                         while wait_ids.intersection(handler_ids):
9286                                 f, event = self._next_poll_event()
9287                                 handler, reg_id = event_handlers[f]
9288                                 handler(f, event)
9289                                 event_handled = True
9290                 except StopIteration:
9291                         event_handled = True
9292
9293                 return event_handled
9294
9295 class QueueScheduler(PollScheduler):
9296
9297         """
9298         Add instances of SequentialTaskQueue and then call run(). The
9299         run() method returns when no tasks remain.
9300         """
9301
9302         def __init__(self, max_jobs=None, max_load=None):
9303                 PollScheduler.__init__(self)
9304
9305                 if max_jobs is None:
9306                         max_jobs = 1
9307
9308                 self._max_jobs = max_jobs
9309                 self._max_load = max_load
9310                 self.sched_iface = self._sched_iface_class(
9311                         register=self._register,
9312                         schedule=self._schedule_wait,
9313                         unregister=self._unregister)
9314
9315                 self._queues = []
9316                 self._schedule_listeners = []
9317
9318         def add(self, q):
9319                 self._queues.append(q)
9320
9321         def remove(self, q):
9322                 self._queues.remove(q)
9323
9324         def run(self):
9325
9326                 while self._schedule():
9327                         self._poll_loop()
9328
9329                 while self._running_job_count():
9330                         self._poll_loop()
9331
9332         def _schedule_tasks(self):
9333                 """
9334                 @rtype: bool
9335                 @returns: True if there may be remaining tasks to schedule,
9336                         False otherwise.
9337                 """
9338                 while self._can_add_job():
9339                         n = self._max_jobs - self._running_job_count()
9340                         if n < 1:
9341                                 break
9342
9343                         if not self._start_next_job(n):
9344                                 return False
9345
9346                 for q in self._queues:
9347                         if q:
9348                                 return True
9349                 return False
9350
9351         def _running_job_count(self):
9352                 job_count = 0
9353                 for q in self._queues:
9354                         job_count += len(q.running_tasks)
9355                 self._jobs = job_count
9356                 return job_count
9357
9358         def _start_next_job(self, n=1):
9359                 started_count = 0
9360                 for q in self._queues:
9361                         initial_job_count = len(q.running_tasks)
9362                         q.schedule()
9363                         final_job_count = len(q.running_tasks)
9364                         if final_job_count > initial_job_count:
9365                                 started_count += (final_job_count - initial_job_count)
9366                         if started_count >= n:
9367                                 break
9368                 return started_count
9369
9370 class TaskScheduler(object):
9371
9372         """
9373         A simple way to handle scheduling of AsynchrousTask instances. Simply
9374         add tasks and call run(). The run() method returns when no tasks remain.
9375         """
9376
9377         def __init__(self, max_jobs=None, max_load=None):
9378                 self._queue = SequentialTaskQueue(max_jobs=max_jobs)
9379                 self._scheduler = QueueScheduler(
9380                         max_jobs=max_jobs, max_load=max_load)
9381                 self.sched_iface = self._scheduler.sched_iface
9382                 self.run = self._scheduler.run
9383                 self._scheduler.add(self._queue)
9384
9385         def add(self, task):
9386                 self._queue.add(task)
9387
9388 class JobStatusDisplay(object):
9389
9390         _bound_properties = ("curval", "failed", "running")
9391         _jobs_column_width = 48
9392
9393         # Don't update the display unless at least this much
9394         # time has passed, in units of seconds.
9395         _min_display_latency = 2
9396
9397         _default_term_codes = {
9398                 'cr'  : '\r',
9399                 'el'  : '\x1b[K',
9400                 'nel' : '\n',
9401         }
9402
9403         _termcap_name_map = {
9404                 'carriage_return' : 'cr',
9405                 'clr_eol'         : 'el',
9406                 'newline'         : 'nel',
9407         }
9408
9409         def __init__(self, out=sys.stdout, quiet=False):
9410                 object.__setattr__(self, "out", out)
9411                 object.__setattr__(self, "quiet", quiet)
9412                 object.__setattr__(self, "maxval", 0)
9413                 object.__setattr__(self, "merges", 0)
9414                 object.__setattr__(self, "_changed", False)
9415                 object.__setattr__(self, "_displayed", False)
9416                 object.__setattr__(self, "_last_display_time", 0)
9417                 object.__setattr__(self, "width", 80)
9418                 self.reset()
9419
9420                 isatty = hasattr(out, "isatty") and out.isatty()
9421                 object.__setattr__(self, "_isatty", isatty)
9422                 if not isatty or not self._init_term():
9423                         term_codes = {}
9424                         for k, capname in self._termcap_name_map.iteritems():
9425                                 term_codes[k] = self._default_term_codes[capname]
9426                         object.__setattr__(self, "_term_codes", term_codes)
9427
9428         def _init_term(self):
9429                 """
9430                 Initialize term control codes.
9431                 @rtype: bool
9432                 @returns: True if term codes were successfully initialized,
9433                         False otherwise.
9434                 """
9435
9436                 term_type = os.environ.get("TERM", "vt100")
9437                 tigetstr = None
9438
9439                 try:
9440                         import curses
9441                         try:
9442                                 curses.setupterm(term_type, self.out.fileno())
9443                                 tigetstr = curses.tigetstr
9444                         except curses.error:
9445                                 pass
9446                 except ImportError:
9447                         pass
9448
9449                 if tigetstr is None:
9450                         return False
9451
9452                 term_codes = {}
9453                 for k, capname in self._termcap_name_map.iteritems():
9454                         code = tigetstr(capname)
9455                         if code is None:
9456                                 code = self._default_term_codes[capname]
9457                         term_codes[k] = code
9458                 object.__setattr__(self, "_term_codes", term_codes)
9459                 return True
9460
9461         def _format_msg(self, msg):
9462                 return ">>> %s" % msg
9463
9464         def _erase(self):
9465                 self.out.write(
9466                         self._term_codes['carriage_return'] + \
9467                         self._term_codes['clr_eol'])
9468                 self.out.flush()
9469                 self._displayed = False
9470
9471         def _display(self, line):
9472                 self.out.write(line)
9473                 self.out.flush()
9474                 self._displayed = True
9475
9476         def _update(self, msg):
9477
9478                 out = self.out
9479                 if not self._isatty:
9480                         out.write(self._format_msg(msg) + self._term_codes['newline'])
9481                         self.out.flush()
9482                         self._displayed = True
9483                         return
9484
9485                 if self._displayed:
9486                         self._erase()
9487
9488                 self._display(self._format_msg(msg))
9489
9490         def displayMessage(self, msg):
9491
9492                 was_displayed = self._displayed
9493
9494                 if self._isatty and self._displayed:
9495                         self._erase()
9496
9497                 self.out.write(self._format_msg(msg) + self._term_codes['newline'])
9498                 self.out.flush()
9499                 self._displayed = False
9500
9501                 if was_displayed:
9502                         self._changed = True
9503                         self.display()
9504
9505         def reset(self):
9506                 self.maxval = 0
9507                 self.merges = 0
9508                 for name in self._bound_properties:
9509                         object.__setattr__(self, name, 0)
9510
9511                 if self._displayed:
9512                         self.out.write(self._term_codes['newline'])
9513                         self.out.flush()
9514                         self._displayed = False
9515
9516         def __setattr__(self, name, value):
9517                 old_value = getattr(self, name)
9518                 if value == old_value:
9519                         return
9520                 object.__setattr__(self, name, value)
9521                 if name in self._bound_properties:
9522                         self._property_change(name, old_value, value)
9523
9524         def _property_change(self, name, old_value, new_value):
9525                 self._changed = True
9526                 self.display()
9527
9528         def _load_avg_str(self):
9529                 try:
9530                         avg = getloadavg()
9531                 except OSError:
9532                         return 'unknown'
9533
9534                 max_avg = max(avg)
9535
9536                 if max_avg < 10:
9537                         digits = 2
9538                 elif max_avg < 100:
9539                         digits = 1
9540                 else:
9541                         digits = 0
9542
9543                 return ", ".join(("%%.%df" % digits ) % x for x in avg)
9544
9545         def display(self):
9546                 """
9547                 Display status on stdout, but only if something has
9548                 changed since the last call.
9549                 """
9550
9551                 if self.quiet:
9552                         return
9553
9554                 current_time = time.time()
9555                 time_delta = current_time - self._last_display_time
9556                 if self._displayed and \
9557                         not self._changed:
9558                         if not self._isatty:
9559                                 return
9560                         if time_delta < self._min_display_latency:
9561                                 return
9562
9563                 self._last_display_time = current_time
9564                 self._changed = False
9565                 self._display_status()
9566
9567         def _display_status(self):
9568                 # Don't use len(self._completed_tasks) here since that also
9569                 # can include uninstall tasks.
9570                 curval_str = str(self.curval)
9571                 maxval_str = str(self.maxval)
9572                 running_str = str(self.running)
9573                 failed_str = str(self.failed)
9574                 load_avg_str = self._load_avg_str()
9575
9576                 color_output = StringIO.StringIO()
9577                 plain_output = StringIO.StringIO()
9578                 style_file = portage.output.ConsoleStyleFile(color_output)
9579                 style_file.write_listener = plain_output
9580                 style_writer = portage.output.StyleWriter(file=style_file, maxcol=9999)
9581                 style_writer.style_listener = style_file.new_styles
9582                 f = formatter.AbstractFormatter(style_writer)
9583
9584                 number_style = "INFORM"
9585                 f.add_literal_data("Jobs: ")
9586                 f.push_style(number_style)
9587                 f.add_literal_data(curval_str)
9588                 f.pop_style()
9589                 f.add_literal_data(" of ")
9590                 f.push_style(number_style)
9591                 f.add_literal_data(maxval_str)
9592                 f.pop_style()
9593                 f.add_literal_data(" complete")
9594
9595                 if self.running:
9596                         f.add_literal_data(", ")
9597                         f.push_style(number_style)
9598                         f.add_literal_data(running_str)
9599                         f.pop_style()
9600                         f.add_literal_data(" running")
9601
9602                 if self.failed:
9603                         f.add_literal_data(", ")
9604                         f.push_style(number_style)
9605                         f.add_literal_data(failed_str)
9606                         f.pop_style()
9607                         f.add_literal_data(" failed")
9608
9609                 padding = self._jobs_column_width - len(plain_output.getvalue())
9610                 if padding > 0:
9611                         f.add_literal_data(padding * " ")
9612
9613                 f.add_literal_data("Load avg: ")
9614                 f.add_literal_data(load_avg_str)
9615
9616                 # Truncate to fit width, to avoid making the terminal scroll if the
9617                 # line overflows (happens when the load average is large).
9618                 plain_output = plain_output.getvalue()
9619                 if self._isatty and len(plain_output) > self.width:
9620                         # Use plain_output here since it's easier to truncate
9621                         # properly than the color output which contains console
9622                         # color codes.
9623                         self._update(plain_output[:self.width])
9624                 else:
9625                         self._update(color_output.getvalue())
9626
9627                 xtermTitle(" ".join(plain_output.split()))
9628
9629 class Scheduler(PollScheduler):
9630
9631         _opts_ignore_blockers = \
9632                 frozenset(["--buildpkgonly",
9633                 "--fetchonly", "--fetch-all-uri",
9634                 "--nodeps", "--pretend"])
9635
9636         _opts_no_background = \
9637                 frozenset(["--pretend",
9638                 "--fetchonly", "--fetch-all-uri"])
9639
9640         _opts_no_restart = frozenset(["--buildpkgonly",
9641                 "--fetchonly", "--fetch-all-uri", "--pretend"])
9642
9643         _bad_resume_opts = set(["--ask", "--changelog",
9644                 "--resume", "--skipfirst"])
9645
9646         _fetch_log = "/var/log/emerge-fetch.log"
9647
9648         class _iface_class(SlotObject):
9649                 __slots__ = ("dblinkEbuildPhase", "dblinkDisplayMerge",
9650                         "dblinkElog", "fetch", "register", "schedule",
9651                         "scheduleSetup", "scheduleUnpack", "scheduleYield",
9652                         "unregister")
9653
9654         class _fetch_iface_class(SlotObject):
9655                 __slots__ = ("log_file", "schedule")
9656
9657         _task_queues_class = slot_dict_class(
9658                 ("merge", "jobs", "fetch", "unpack"), prefix="")
9659
9660         class _build_opts_class(SlotObject):
9661                 __slots__ = ("buildpkg", "buildpkgonly",
9662                         "fetch_all_uri", "fetchonly", "pretend")
9663
9664         class _binpkg_opts_class(SlotObject):
9665                 __slots__ = ("fetchonly", "getbinpkg", "pretend")
9666
9667         class _pkg_count_class(SlotObject):
9668                 __slots__ = ("curval", "maxval")
9669
9670         class _emerge_log_class(SlotObject):
9671                 __slots__ = ("xterm_titles",)
9672
9673                 def log(self, *pargs, **kwargs):
9674                         if not self.xterm_titles:
9675                                 # Avoid interference with the scheduler's status display.
9676                                 kwargs.pop("short_msg", None)
9677                         emergelog(self.xterm_titles, *pargs, **kwargs)
9678
9679         class _failed_pkg(SlotObject):
9680                 __slots__ = ("build_dir", "build_log", "pkg", "returncode")
9681
9682         class _ConfigPool(object):
9683                 """Interface for a task to temporarily allocate a config
9684                 instance from a pool. This allows a task to be constructed
9685                 long before the config instance actually becomes needed, like
9686                 when prefetchers are constructed for the whole merge list."""
9687                 __slots__ = ("_root", "_allocate", "_deallocate")
9688                 def __init__(self, root, allocate, deallocate):
9689                         self._root = root
9690                         self._allocate = allocate
9691                         self._deallocate = deallocate
9692                 def allocate(self):
9693                         return self._allocate(self._root)
9694                 def deallocate(self, settings):
9695                         self._deallocate(settings)
9696
9697         class _unknown_internal_error(portage.exception.PortageException):
9698                 """
9699                 Used internally to terminate scheduling. The specific reason for
9700                 the failure should have been dumped to stderr.
9701                 """
9702                 def __init__(self, value=""):
9703                         portage.exception.PortageException.__init__(self, value)
9704
9705         def __init__(self, settings, trees, mtimedb, myopts,
9706                 spinner, mergelist, favorites, digraph):
9707                 PollScheduler.__init__(self)
9708                 self.settings = settings
9709                 self.target_root = settings["ROOT"]
9710                 self.trees = trees
9711                 self.myopts = myopts
9712                 self._spinner = spinner
9713                 self._mtimedb = mtimedb
9714                 self._mergelist = mergelist
9715                 self._favorites = favorites
9716                 self._args_set = InternalPackageSet(favorites)
9717                 self._build_opts = self._build_opts_class()
9718                 for k in self._build_opts.__slots__:
9719                         setattr(self._build_opts, k, "--" + k.replace("_", "-") in myopts)
9720                 self._binpkg_opts = self._binpkg_opts_class()
9721                 for k in self._binpkg_opts.__slots__:
9722                         setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
9723
9724                 self.curval = 0
9725                 self._logger = self._emerge_log_class()
9726                 self._task_queues = self._task_queues_class()
9727                 for k in self._task_queues.allowed_keys:
9728                         setattr(self._task_queues, k,
9729                                 SequentialTaskQueue())
9730                 self._status_display = JobStatusDisplay()
9731                 self._max_load = myopts.get("--load-average")
9732                 max_jobs = myopts.get("--jobs")
9733                 if max_jobs is None:
9734                         max_jobs = 1
9735                 self._set_max_jobs(max_jobs)
9736
9737                 # The root where the currently running
9738                 # portage instance is installed.
9739                 self._running_root = trees["/"]["root_config"]
9740                 self.edebug = 0
9741                 if settings.get("PORTAGE_DEBUG", "") == "1":
9742                         self.edebug = 1
9743                 self.pkgsettings = {}
9744                 self._config_pool = {}
9745                 self._blocker_db = {}
9746                 for root in trees:
9747                         self._config_pool[root] = []
9748                         self._blocker_db[root] = BlockerDB(trees[root]["root_config"])
9749
9750                 fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
9751                         schedule=self._schedule_fetch)
9752                 self._sched_iface = self._iface_class(
9753                         dblinkEbuildPhase=self._dblink_ebuild_phase,
9754                         dblinkDisplayMerge=self._dblink_display_merge,
9755                         dblinkElog=self._dblink_elog,
9756                         fetch=fetch_iface, register=self._register,
9757                         schedule=self._schedule_wait,
9758                         scheduleSetup=self._schedule_setup,
9759                         scheduleUnpack=self._schedule_unpack,
9760                         scheduleYield=self._schedule_yield,
9761                         unregister=self._unregister)
9762
9763                 self._prefetchers = weakref.WeakValueDictionary()
9764                 self._pkg_queue = []
9765                 self._completed_tasks = set()
9766
9767                 self._failed_pkgs = []
9768                 self._failed_pkgs_all = []
9769                 self._failed_pkgs_die_msgs = []
9770                 self._post_mod_echo_msgs = []
9771                 self._parallel_fetch = False
9772                 merge_count = len([x for x in mergelist \
9773                         if isinstance(x, Package) and x.operation == "merge"])
9774                 self._pkg_count = self._pkg_count_class(
9775                         curval=0, maxval=merge_count)
9776                 self._status_display.maxval = self._pkg_count.maxval
9777
9778                 # The load average takes some time to respond when new
9779                 # jobs are added, so we need to limit the rate of adding
9780                 # new jobs.
9781                 self._job_delay_max = 10
9782                 self._job_delay_factor = 1.0
9783                 self._job_delay_exp = 1.5
9784                 self._previous_job_start_time = None
9785
9786                 self._set_digraph(digraph)
9787
9788                 # This is used to memoize the _choose_pkg() result when
9789                 # no packages can be chosen until one of the existing
9790                 # jobs completes.
9791                 self._choose_pkg_return_early = False
9792
9793                 features = self.settings.features
9794                 if "parallel-fetch" in features and \
9795                         not ("--pretend" in self.myopts or \
9796                         "--fetch-all-uri" in self.myopts or \
9797                         "--fetchonly" in self.myopts):
9798                         if "distlocks" not in features:
9799                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9800                                 portage.writemsg(red("!!!")+" parallel-fetching " + \
9801                                         "requires the distlocks feature enabled"+"\n",
9802                                         noiselevel=-1)
9803                                 portage.writemsg(red("!!!")+" you have it disabled, " + \
9804                                         "thus parallel-fetching is being disabled"+"\n",
9805                                         noiselevel=-1)
9806                                 portage.writemsg(red("!!!")+"\n", noiselevel=-1)
9807                         elif len(mergelist) > 1:
9808                                 self._parallel_fetch = True
9809
9810                 if self._parallel_fetch:
9811                                 # clear out existing fetch log if it exists
9812                                 try:
9813                                         open(self._fetch_log, 'w')
9814                                 except EnvironmentError:
9815                                         pass
9816
9817                 self._running_portage = None
9818                 portage_match = self._running_root.trees["vartree"].dbapi.match(
9819                         portage.const.PORTAGE_PACKAGE_ATOM)
9820                 if portage_match:
9821                         cpv = portage_match.pop()
9822                         self._running_portage = self._pkg(cpv, "installed",
9823                                 self._running_root, installed=True)
9824
9825         def _poll(self, timeout=None):
9826                 self._schedule()
9827                 PollScheduler._poll(self, timeout=timeout)
9828
9829         def _set_max_jobs(self, max_jobs):
9830                 self._max_jobs = max_jobs
9831                 self._task_queues.jobs.max_jobs = max_jobs
9832
9833         def _background_mode(self):
9834                 """
9835                 Check if background mode is enabled and adjust states as necessary.
9836
9837                 @rtype: bool
9838                 @returns: True if background mode is enabled, False otherwise.
9839                 """
9840                 background = (self._max_jobs is True or \
9841                         self._max_jobs > 1 or "--quiet" in self.myopts) and \
9842                         not bool(self._opts_no_background.intersection(self.myopts))
9843
9844                 if background:
9845                         interactive_tasks = self._get_interactive_tasks()
9846                         if interactive_tasks:
9847                                 background = False
9848                                 writemsg_level(">>> Sending package output to stdio due " + \
9849                                         "to interactive package(s):\n",
9850                                         level=logging.INFO, noiselevel=-1)
9851                                 msg = [""]
9852                                 for pkg in interactive_tasks:
9853                                         pkg_str = "  " + colorize("INFORM", str(pkg.cpv))
9854                                         if pkg.root != "/":
9855                                                 pkg_str += " for " + pkg.root
9856                                         msg.append(pkg_str)
9857                                 msg.append("")
9858                                 writemsg_level("".join("%s\n" % (l,) for l in msg),
9859                                         level=logging.INFO, noiselevel=-1)
9860                                 if self._max_jobs is True or self._max_jobs > 1:
9861                                         self._set_max_jobs(1)
9862                                         writemsg_level(">>> Setting --jobs=1 due " + \
9863                                                 "to the above interactive package(s)\n",
9864                                                 level=logging.INFO, noiselevel=-1)
9865
9866                 self._status_display.quiet = \
9867                         not background or \
9868                         ("--quiet" in self.myopts and \
9869                         "--verbose" not in self.myopts)
9870
9871                 self._logger.xterm_titles = \
9872                         "notitles" not in self.settings.features and \
9873                         self._status_display.quiet
9874
9875                 return background
9876
9877         def _get_interactive_tasks(self):
9878                 from portage import flatten
9879                 from portage.dep import use_reduce, paren_reduce
9880                 interactive_tasks = []
9881                 for task in self._mergelist:
9882                         if not (isinstance(task, Package) and \
9883                                 task.operation == "merge"):
9884                                 continue
9885                         try:
9886                                 properties = flatten(use_reduce(paren_reduce(
9887                                         task.metadata["PROPERTIES"]), uselist=task.use.enabled))
9888                         except portage.exception.InvalidDependString, e:
9889                                 show_invalid_depstring_notice(task,
9890                                         task.metadata["PROPERTIES"], str(e))
9891                                 raise self._unknown_internal_error()
9892                         if "interactive" in properties:
9893                                 interactive_tasks.append(task)
9894                 return interactive_tasks
9895
9896         def _set_digraph(self, digraph):
9897                 if "--nodeps" in self.myopts or \
9898                         (self._max_jobs is not True and self._max_jobs < 2):
9899                         # save some memory
9900                         self._digraph = None
9901                         return
9902
9903                 self._digraph = digraph
9904                 self._prune_digraph()
9905
9906         def _prune_digraph(self):
9907                 """
9908                 Prune any root nodes that are irrelevant.
9909                 """
9910
9911                 graph = self._digraph
9912                 completed_tasks = self._completed_tasks
9913                 removed_nodes = set()
9914                 while True:
9915                         for node in graph.root_nodes():
9916                                 if not isinstance(node, Package) or \
9917                                         (node.installed and node.operation == "nomerge") or \
9918                                         node.onlydeps or \
9919                                         node in completed_tasks:
9920                                         removed_nodes.add(node)
9921                         if removed_nodes:
9922                                 graph.difference_update(removed_nodes)
9923                         if not removed_nodes:
9924                                 break
9925                         removed_nodes.clear()
9926
9927         class _pkg_failure(portage.exception.PortageException):
9928                 """
9929                 An instance of this class is raised by unmerge() when
9930                 an uninstallation fails.
9931                 """
9932                 status = 1
9933                 def __init__(self, *pargs):
9934                         portage.exception.PortageException.__init__(self, pargs)
9935                         if pargs:
9936                                 self.status = pargs[0]
9937
9938         def _schedule_fetch(self, fetcher):
9939                 """
9940                 Schedule a fetcher on the fetch queue, in order to
9941                 serialize access to the fetch log.
9942                 """
9943                 self._task_queues.fetch.addFront(fetcher)
9944
9945         def _schedule_setup(self, setup_phase):
9946                 """
9947                 Schedule a setup phase on the merge queue, in order to
9948                 serialize unsandboxed access to the live filesystem.
9949                 """
9950                 self._task_queues.merge.addFront(setup_phase)
9951                 self._schedule()
9952
9953         def _schedule_unpack(self, unpack_phase):
9954                 """
9955                 Schedule an unpack phase on the unpack queue, in order
9956                 to serialize $DISTDIR access for live ebuilds.
9957                 """
9958                 self._task_queues.unpack.add(unpack_phase)
9959
9960         def _find_blockers(self, new_pkg):
9961                 """
9962                 Returns a callable which should be called only when
9963                 the vdb lock has been acquired.
9964                 """
9965                 def get_blockers():
9966                         return self._find_blockers_with_lock(new_pkg, acquire_lock=0)
9967                 return get_blockers
9968
9969         def _find_blockers_with_lock(self, new_pkg, acquire_lock=0):
9970                 if self._opts_ignore_blockers.intersection(self.myopts):
9971                         return None
9972
9973                 # Call gc.collect() here to avoid heap overflow that
9974                 # triggers 'Cannot allocate memory' errors (reported
9975                 # with python-2.5).
9976                 import gc
9977                 gc.collect()
9978
9979                 blocker_db = self._blocker_db[new_pkg.root]
9980
9981                 blocker_dblinks = []
9982                 for blocking_pkg in blocker_db.findInstalledBlockers(
9983                         new_pkg, acquire_lock=acquire_lock):
9984                         if new_pkg.slot_atom == blocking_pkg.slot_atom:
9985                                 continue
9986                         if new_pkg.cpv == blocking_pkg.cpv:
9987                                 continue
9988                         blocker_dblinks.append(portage.dblink(
9989                                 blocking_pkg.category, blocking_pkg.pf, blocking_pkg.root,
9990                                 self.pkgsettings[blocking_pkg.root], treetype="vartree",
9991                                 vartree=self.trees[blocking_pkg.root]["vartree"]))
9992
9993                 gc.collect()
9994
9995                 return blocker_dblinks
9996
9997         def _dblink_pkg(self, pkg_dblink):
9998                 cpv = pkg_dblink.mycpv
9999                 type_name = RootConfig.tree_pkg_map[pkg_dblink.treetype]
10000                 root_config = self.trees[pkg_dblink.myroot]["root_config"]
10001                 installed = type_name == "installed"
10002                 return self._pkg(cpv, type_name, root_config, installed=installed)
10003
10004         def _append_to_log_path(self, log_path, msg):
10005                 f = open(log_path, 'a')
10006                 try:
10007                         f.write(msg)
10008                 finally:
10009                         f.close()
10010
10011         def _dblink_elog(self, pkg_dblink, phase, func, msgs):
10012
10013                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10014                 log_file = None
10015                 out = sys.stdout
10016                 background = self._background
10017
10018                 if background and log_path is not None:
10019                         log_file = open(log_path, 'a')
10020                         out = log_file
10021
10022                 try:
10023                         for msg in msgs:
10024                                 func(msg, phase=phase, key=pkg_dblink.mycpv, out=out)
10025                 finally:
10026                         if log_file is not None:
10027                                 log_file.close()
10028
10029         def _dblink_display_merge(self, pkg_dblink, msg, level=0, noiselevel=0):
10030                 log_path = pkg_dblink.settings.get("PORTAGE_LOG_FILE")
10031                 background = self._background
10032
10033                 if log_path is None:
10034                         if not (background and level < logging.WARN):
10035                                 portage.util.writemsg_level(msg,
10036                                         level=level, noiselevel=noiselevel)
10037                 else:
10038                         if not background:
10039                                 portage.util.writemsg_level(msg,
10040                                         level=level, noiselevel=noiselevel)
10041                         self._append_to_log_path(log_path, msg)
10042
10043         def _dblink_ebuild_phase(self,
10044                 pkg_dblink, pkg_dbapi, ebuild_path, phase):
10045                 """
10046                 Using this callback for merge phases allows the scheduler
10047                 to run while these phases execute asynchronously, and allows
10048                 the scheduler control output handling.
10049                 """
10050
10051                 scheduler = self._sched_iface
10052                 settings = pkg_dblink.settings
10053                 pkg = self._dblink_pkg(pkg_dblink)
10054                 background = self._background
10055                 log_path = settings.get("PORTAGE_LOG_FILE")
10056
10057                 ebuild_phase = EbuildPhase(background=background,
10058                         pkg=pkg, phase=phase, scheduler=scheduler,
10059                         settings=settings, tree=pkg_dblink.treetype)
10060                 ebuild_phase.start()
10061                 ebuild_phase.wait()
10062
10063                 return ebuild_phase.returncode
10064
10065         def _check_manifests(self):
10066                 # Verify all the manifests now so that the user is notified of failure
10067                 # as soon as possible.
10068                 if "strict" not in self.settings.features or \
10069                         "--fetchonly" in self.myopts or \
10070                         "--fetch-all-uri" in self.myopts:
10071                         return os.EX_OK
10072
10073                 shown_verifying_msg = False
10074                 quiet_settings = {}
10075                 for myroot, pkgsettings in self.pkgsettings.iteritems():
10076                         quiet_config = portage.config(clone=pkgsettings)
10077                         quiet_config["PORTAGE_QUIET"] = "1"
10078                         quiet_config.backup_changes("PORTAGE_QUIET")
10079                         quiet_settings[myroot] = quiet_config
10080                         del quiet_config
10081
10082                 for x in self._mergelist:
10083                         if not isinstance(x, Package) or \
10084                                 x.type_name != "ebuild":
10085                                 continue
10086
10087                         if not shown_verifying_msg:
10088                                 shown_verifying_msg = True
10089                                 self._status_msg("Verifying ebuild manifests")
10090
10091                         root_config = x.root_config
10092                         portdb = root_config.trees["porttree"].dbapi
10093                         quiet_config = quiet_settings[root_config.root]
10094                         quiet_config["O"] = os.path.dirname(portdb.findname(x.cpv))
10095                         if not portage.digestcheck([], quiet_config, strict=True):
10096                                 return 1
10097
10098                 return os.EX_OK
10099
10100         def _add_prefetchers(self):
10101
10102                 if not self._parallel_fetch:
10103                         return
10104
10105                 if self._parallel_fetch:
10106                         self._status_msg("Starting parallel fetch")
10107
10108                         prefetchers = self._prefetchers
10109                         getbinpkg = "--getbinpkg" in self.myopts
10110
10111                         # In order to avoid "waiting for lock" messages
10112                         # at the beginning, which annoy users, never
10113                         # spawn a prefetcher for the first package.
10114                         for pkg in self._mergelist[1:]:
10115                                 prefetcher = self._create_prefetcher(pkg)
10116                                 if prefetcher is not None:
10117                                         self._task_queues.fetch.add(prefetcher)
10118                                         prefetchers[pkg] = prefetcher
10119
10120         def _create_prefetcher(self, pkg):
10121                 """
10122                 @return: a prefetcher, or None if not applicable
10123                 """
10124                 prefetcher = None
10125
10126                 if not isinstance(pkg, Package):
10127                         pass
10128
10129                 elif pkg.type_name == "ebuild":
10130
10131                         prefetcher = EbuildFetcher(background=True,
10132                                 config_pool=self._ConfigPool(pkg.root,
10133                                 self._allocate_config, self._deallocate_config),
10134                                 fetchonly=1, logfile=self._fetch_log,
10135                                 pkg=pkg, prefetch=True, scheduler=self._sched_iface)
10136
10137                 elif pkg.type_name == "binary" and \
10138                         "--getbinpkg" in self.myopts and \
10139                         pkg.root_config.trees["bintree"].isremote(pkg.cpv):
10140
10141                         prefetcher = BinpkgPrefetcher(background=True,
10142                                 pkg=pkg, scheduler=self._sched_iface)
10143
10144                 return prefetcher
10145
10146         def _is_restart_scheduled(self):
10147                 """
10148                 Check if the merge list contains a replacement
10149                 for the current running instance, that will result
10150                 in restart after merge.
10151                 @rtype: bool
10152                 @returns: True if a restart is scheduled, False otherwise.
10153                 """
10154                 if self._opts_no_restart.intersection(self.myopts):
10155                         return False
10156
10157                 mergelist = self._mergelist
10158
10159                 for i, pkg in enumerate(mergelist):
10160                         if self._is_restart_necessary(pkg) and \
10161                                 i != len(mergelist) - 1:
10162                                 return True
10163
10164                 return False
10165
10166         def _is_restart_necessary(self, pkg):
10167                 """
10168                 @return: True if merging the given package
10169                         requires restart, False otherwise.
10170                 """
10171
10172                 # Figure out if we need a restart.
10173                 if pkg.root == self._running_root.root and \
10174                         portage.match_from_list(
10175                         portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
10176                         if self._running_portage:
10177                                 return pkg.cpv != self._running_portage.cpv
10178                         return True
10179                 return False
10180
10181         def _restart_if_necessary(self, pkg):
10182                 """
10183                 Use execv() to restart emerge. This happens
10184                 if portage upgrades itself and there are
10185                 remaining packages in the list.
10186                 """
10187
10188                 if self._opts_no_restart.intersection(self.myopts):
10189                         return
10190
10191                 if not self._is_restart_necessary(pkg):
10192                         return
10193
10194                 if pkg == self._mergelist[-1]:
10195                         return
10196
10197                 self._main_loop_cleanup()
10198
10199                 logger = self._logger
10200                 pkg_count = self._pkg_count
10201                 mtimedb = self._mtimedb
10202                 bad_resume_opts = self._bad_resume_opts
10203
10204                 logger.log(" ::: completed emerge (%s of %s) %s to %s" % \
10205                         (pkg_count.curval, pkg_count.maxval, pkg.cpv, pkg.root))
10206
10207                 logger.log(" *** RESTARTING " + \
10208                         "emerge via exec() after change of " + \
10209                         "portage version.")
10210
10211                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10212                 mtimedb.commit()
10213                 portage.run_exitfuncs()
10214                 mynewargv = [sys.argv[0], "--resume"]
10215                 resume_opts = self.myopts.copy()
10216                 # For automatic resume, we need to prevent
10217                 # any of bad_resume_opts from leaking in
10218                 # via EMERGE_DEFAULT_OPTS.
10219                 resume_opts["--ignore-default-opts"] = True
10220                 for myopt, myarg in resume_opts.iteritems():
10221                         if myopt not in bad_resume_opts:
10222                                 if myarg is True:
10223                                         mynewargv.append(myopt)
10224                                 else:
10225                                         mynewargv.append(myopt +"="+ str(myarg))
10226                 # priority only needs to be adjusted on the first run
10227                 os.environ["PORTAGE_NICENESS"] = "0"
10228                 os.execv(mynewargv[0], mynewargv)
10229
10230         def merge(self):
10231
10232                 if "--resume" in self.myopts:
10233                         # We're resuming.
10234                         portage.writemsg_stdout(
10235                                 colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
10236                         self._logger.log(" *** Resuming merge...")
10237
10238                 self._save_resume_list()
10239
10240                 try:
10241                         self._background = self._background_mode()
10242                 except self._unknown_internal_error:
10243                         return 1
10244
10245                 for root in self.trees:
10246                         root_config = self.trees[root]["root_config"]
10247
10248                         # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
10249                         # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
10250                         # for ensuring sane $PWD (bug #239560) and storing elog messages.
10251                         tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
10252                         if not tmpdir or not os.path.isdir(tmpdir):
10253                                 msg = "The directory specified in your " + \
10254                                         "PORTAGE_TMPDIR variable, '%s', " % tmpdir + \
10255                                 "does not exist. Please create this " + \
10256                                 "directory or correct your PORTAGE_TMPDIR setting."
10257                                 msg = textwrap.wrap(msg, 70)
10258                                 out = portage.output.EOutput()
10259                                 for l in msg:
10260                                         out.eerror(l)
10261                                 return 1
10262
10263                         if self._background:
10264                                 root_config.settings.unlock()
10265                                 root_config.settings["PORTAGE_BACKGROUND"] = "1"
10266                                 root_config.settings.backup_changes("PORTAGE_BACKGROUND")
10267                                 root_config.settings.lock()
10268
10269                         self.pkgsettings[root] = portage.config(
10270                                 clone=root_config.settings)
10271
10272                 rval = self._check_manifests()
10273                 if rval != os.EX_OK:
10274                         return rval
10275
10276                 keep_going = "--keep-going" in self.myopts
10277                 fetchonly = self._build_opts.fetchonly
10278                 mtimedb = self._mtimedb
10279                 failed_pkgs = self._failed_pkgs
10280
10281                 while True:
10282                         rval = self._merge()
10283                         if rval == os.EX_OK or fetchonly or not keep_going:
10284                                 break
10285                         if "resume" not in mtimedb:
10286                                 break
10287                         mergelist = self._mtimedb["resume"].get("mergelist")
10288                         if not mergelist:
10289                                 break
10290
10291                         if not failed_pkgs:
10292                                 break
10293
10294                         for failed_pkg in failed_pkgs:
10295                                 mergelist.remove(list(failed_pkg.pkg))
10296
10297                         self._failed_pkgs_all.extend(failed_pkgs)
10298                         del failed_pkgs[:]
10299
10300                         if not mergelist:
10301                                 break
10302
10303                         if not self._calc_resume_list():
10304                                 break
10305
10306                         clear_caches(self.trees)
10307                         if not self._mergelist:
10308                                 break
10309
10310                         self._save_resume_list()
10311                         self._pkg_count.curval = 0
10312                         self._pkg_count.maxval = len([x for x in self._mergelist \
10313                                 if isinstance(x, Package) and x.operation == "merge"])
10314                         self._status_display.maxval = self._pkg_count.maxval
10315
10316                 self._logger.log(" *** Finished. Cleaning up...")
10317
10318                 if failed_pkgs:
10319                         self._failed_pkgs_all.extend(failed_pkgs)
10320                         del failed_pkgs[:]
10321
10322                 background = self._background
10323                 failure_log_shown = False
10324                 if background and len(self._failed_pkgs_all) == 1:
10325                         # If only one package failed then just show it's
10326                         # whole log for easy viewing.
10327                         failed_pkg = self._failed_pkgs_all[-1]
10328                         build_dir = failed_pkg.build_dir
10329                         log_file = None
10330
10331                         log_paths = [failed_pkg.build_log]
10332
10333                         log_path = self._locate_failure_log(failed_pkg)
10334                         if log_path is not None:
10335                                 try:
10336                                         log_file = open(log_path, 'rb')
10337                                 except IOError:
10338                                         pass
10339
10340                         if log_file is not None:
10341                                 try:
10342                                         for line in log_file:
10343                                                 writemsg_level(line, noiselevel=-1)
10344                                 finally:
10345                                         log_file.close()
10346                                 failure_log_shown = True
10347
10348                 # Dump mod_echo output now since it tends to flood the terminal.
10349                 # This allows us to avoid having more important output, generated
10350                 # later, from being swept away by the mod_echo output.
10351                 mod_echo_output =  _flush_elog_mod_echo()
10352
10353                 if background and not failure_log_shown and \
10354                         self._failed_pkgs_all and \
10355                         self._failed_pkgs_die_msgs and \
10356                         not mod_echo_output:
10357
10358                         printer = portage.output.EOutput()
10359                         for mysettings, key, logentries in self._failed_pkgs_die_msgs:
10360                                 root_msg = ""
10361                                 if mysettings["ROOT"] != "/":
10362                                         root_msg = " merged to %s" % mysettings["ROOT"]
10363                                 print
10364                                 printer.einfo("Error messages for package %s%s:" % \
10365                                         (colorize("INFORM", key), root_msg))
10366                                 print
10367                                 for phase in portage.const.EBUILD_PHASES:
10368                                         if phase not in logentries:
10369                                                 continue
10370                                         for msgtype, msgcontent in logentries[phase]:
10371                                                 if isinstance(msgcontent, basestring):
10372                                                         msgcontent = [msgcontent]
10373                                                 for line in msgcontent:
10374                                                         printer.eerror(line.strip("\n"))
10375
10376                 if self._post_mod_echo_msgs:
10377                         for msg in self._post_mod_echo_msgs:
10378                                 msg()
10379
10380                 if len(self._failed_pkgs_all) > 1:
10381                         msg = "The following packages have " + \
10382                                 "failed to build or install:"
10383                         prefix = bad(" * ")
10384                         writemsg(prefix + "\n", noiselevel=-1)
10385                         from textwrap import wrap
10386                         for line in wrap(msg, 72):
10387                                 writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
10388                         writemsg(prefix + "\n", noiselevel=-1)
10389                         for failed_pkg in self._failed_pkgs_all:
10390                                 writemsg("%s\t%s\n" % (prefix,
10391                                         colorize("INFORM", str(failed_pkg.pkg))),
10392                                         noiselevel=-1)
10393                         writemsg(prefix + "\n", noiselevel=-1)
10394
10395                 return rval
10396
10397         def _elog_listener(self, mysettings, key, logentries, fulltext):
10398                 errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
10399                 if errors:
10400                         self._failed_pkgs_die_msgs.append(
10401                                 (mysettings, key, errors))
10402
10403         def _locate_failure_log(self, failed_pkg):
10404
10405                 build_dir = failed_pkg.build_dir
10406                 log_file = None
10407
10408                 log_paths = [failed_pkg.build_log]
10409
10410                 for log_path in log_paths:
10411                         if not log_path:
10412                                 continue
10413
10414                         try:
10415                                 log_size = os.stat(log_path).st_size
10416                         except OSError:
10417                                 continue
10418
10419                         if log_size == 0:
10420                                 continue
10421
10422                         return log_path
10423
10424                 return None
10425
10426         def _add_packages(self):
10427                 pkg_queue = self._pkg_queue
10428                 for pkg in self._mergelist:
10429                         if isinstance(pkg, Package):
10430                                 pkg_queue.append(pkg)
10431                         elif isinstance(pkg, Blocker):
10432                                 pass
10433
10434         def _merge_exit(self, merge):
10435                 self._do_merge_exit(merge)
10436                 self._deallocate_config(merge.merge.settings)
10437                 if merge.returncode == os.EX_OK and \
10438                         not merge.merge.pkg.installed:
10439                         self._status_display.curval += 1
10440                 self._status_display.merges = len(self._task_queues.merge)
10441                 self._schedule()
10442
10443         def _do_merge_exit(self, merge):
10444                 pkg = merge.merge.pkg
10445                 if merge.returncode != os.EX_OK:
10446                         settings = merge.merge.settings
10447                         build_dir = settings.get("PORTAGE_BUILDDIR")
10448                         build_log = settings.get("PORTAGE_LOG_FILE")
10449
10450                         self._failed_pkgs.append(self._failed_pkg(
10451                                 build_dir=build_dir, build_log=build_log,
10452                                 pkg=pkg,
10453                                 returncode=merge.returncode))
10454                         self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
10455
10456                         self._status_display.failed = len(self._failed_pkgs)
10457                         return
10458
10459                 self._task_complete(pkg)
10460                 pkg_to_replace = merge.merge.pkg_to_replace
10461                 if pkg_to_replace is not None:
10462                         # When a package is replaced, mark it's uninstall
10463                         # task complete (if any).
10464                         uninst_hash_key = \
10465                                 ("installed", pkg.root, pkg_to_replace.cpv, "uninstall")
10466                         self._task_complete(uninst_hash_key)
10467
10468                 if pkg.installed:
10469                         return
10470
10471                 self._restart_if_necessary(pkg)
10472
10473                 # Call mtimedb.commit() after each merge so that
10474                 # --resume still works after being interrupted
10475                 # by reboot, sigkill or similar.
10476                 mtimedb = self._mtimedb
10477                 mtimedb["resume"]["mergelist"].remove(list(pkg))
10478                 if not mtimedb["resume"]["mergelist"]:
10479                         del mtimedb["resume"]
10480                 mtimedb.commit()
10481
10482         def _build_exit(self, build):
10483                 if build.returncode == os.EX_OK:
10484                         self.curval += 1
10485                         merge = PackageMerge(merge=build)
10486                         merge.addExitListener(self._merge_exit)
10487                         self._task_queues.merge.add(merge)
10488                         self._status_display.merges = len(self._task_queues.merge)
10489                 else:
10490                         settings = build.settings
10491                         build_dir = settings.get("PORTAGE_BUILDDIR")
10492                         build_log = settings.get("PORTAGE_LOG_FILE")
10493
10494                         self._failed_pkgs.append(self._failed_pkg(
10495                                 build_dir=build_dir, build_log=build_log,
10496                                 pkg=build.pkg,
10497                                 returncode=build.returncode))
10498                         self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
10499
10500                         self._status_display.failed = len(self._failed_pkgs)
10501                         self._deallocate_config(build.settings)
10502                 self._jobs -= 1
10503                 self._status_display.running = self._jobs
10504                 self._schedule()
10505
10506         def _extract_exit(self, build):
10507                 self._build_exit(build)
10508
10509         def _task_complete(self, pkg):
10510                 self._completed_tasks.add(pkg)
10511                 self._choose_pkg_return_early = False
10512
10513         def _merge(self):
10514
10515                 self._add_prefetchers()
10516                 self._add_packages()
10517                 pkg_queue = self._pkg_queue
10518                 failed_pkgs = self._failed_pkgs
10519                 portage.locks._quiet = self._background
10520                 portage.elog._emerge_elog_listener = self._elog_listener
10521                 rval = os.EX_OK
10522
10523                 try:
10524                         self._main_loop()
10525                 finally:
10526                         self._main_loop_cleanup()
10527                         portage.locks._quiet = False
10528                         portage.elog._emerge_elog_listener = None
10529                         if failed_pkgs:
10530                                 rval = failed_pkgs[-1].returncode
10531
10532                 return rval
10533
10534         def _main_loop_cleanup(self):
10535                 del self._pkg_queue[:]
10536                 self._completed_tasks.clear()
10537                 self._choose_pkg_return_early = False
10538                 self._status_display.reset()
10539                 self._digraph = None
10540                 self._task_queues.fetch.clear()
10541
10542         def _choose_pkg(self):
10543                 """
10544                 Choose a task that has all it's dependencies satisfied.
10545                 """
10546
10547                 if self._choose_pkg_return_early:
10548                         return None
10549
10550                 if self._digraph is None:
10551                         if (self._jobs or self._task_queues.merge) and \
10552                                 not ("--nodeps" in self.myopts and \
10553                                 (self._max_jobs is True or self._max_jobs > 1)):
10554                                 self._choose_pkg_return_early = True
10555                                 return None
10556                         return self._pkg_queue.pop(0)
10557
10558                 if not (self._jobs or self._task_queues.merge):
10559                         return self._pkg_queue.pop(0)
10560
10561                 self._prune_digraph()
10562
10563                 chosen_pkg = None
10564                 later = set(self._pkg_queue)
10565                 for pkg in self._pkg_queue:
10566                         later.remove(pkg)
10567                         if not self._dependent_on_scheduled_merges(pkg, later):
10568                                 chosen_pkg = pkg
10569                                 break
10570
10571                 if chosen_pkg is not None:
10572                         self._pkg_queue.remove(chosen_pkg)
10573
10574                 if chosen_pkg is None:
10575                         # There's no point in searching for a package to
10576                         # choose until at least one of the existing jobs
10577                         # completes.
10578                         self._choose_pkg_return_early = True
10579
10580                 return chosen_pkg
10581
10582         def _dependent_on_scheduled_merges(self, pkg, later):
10583                 """
10584                 Traverse the subgraph of the given packages deep dependencies
10585                 to see if it contains any scheduled merges.
10586                 @param pkg: a package to check dependencies for
10587                 @type pkg: Package
10588                 @param later: packages for which dependence should be ignored
10589                         since they will be merged later than pkg anyway and therefore
10590                         delaying the merge of pkg will not result in a more optimal
10591                         merge order
10592                 @type later: set
10593                 @rtype: bool
10594                 @returns: True if the package is dependent, False otherwise.
10595                 """
10596
10597                 graph = self._digraph
10598                 completed_tasks = self._completed_tasks
10599
10600                 dependent = False
10601                 traversed_nodes = set([pkg])
10602                 direct_deps = graph.child_nodes(pkg)
10603                 node_stack = direct_deps
10604                 direct_deps = frozenset(direct_deps)
10605                 while node_stack:
10606                         node = node_stack.pop()
10607                         if node in traversed_nodes:
10608                                 continue
10609                         traversed_nodes.add(node)
10610                         if not ((node.installed and node.operation == "nomerge") or \
10611                                 (node.operation == "uninstall" and \
10612                                 node not in direct_deps) or \
10613                                 node in completed_tasks or \
10614                                 node in later):
10615                                 dependent = True
10616                                 break
10617                         node_stack.extend(graph.child_nodes(node))
10618
10619                 return dependent
10620
10621         def _allocate_config(self, root):
10622                 """
10623                 Allocate a unique config instance for a task in order
10624                 to prevent interference between parallel tasks.
10625                 """
10626                 if self._config_pool[root]:
10627                         temp_settings = self._config_pool[root].pop()
10628                 else:
10629                         temp_settings = portage.config(clone=self.pkgsettings[root])
10630                 # Since config.setcpv() isn't guaranteed to call config.reset() due to
10631                 # performance reasons, call it here to make sure all settings from the
10632                 # previous package get flushed out (such as PORTAGE_LOG_FILE).
10633                 temp_settings.reload()
10634                 temp_settings.reset()
10635                 return temp_settings
10636
10637         def _deallocate_config(self, settings):
10638                 self._config_pool[settings["ROOT"]].append(settings)
10639
10640         def _main_loop(self):
10641
10642                 # Only allow 1 job max if a restart is scheduled
10643                 # due to portage update.
10644                 if self._is_restart_scheduled() or \
10645                         self._opts_no_background.intersection(self.myopts):
10646                         self._set_max_jobs(1)
10647
10648                 merge_queue = self._task_queues.merge
10649
10650                 while self._schedule():
10651                         if self._poll_event_handlers:
10652                                 self._poll_loop()
10653
10654                 while True:
10655                         self._schedule()
10656                         if not (self._jobs or merge_queue):
10657                                 break
10658                         if self._poll_event_handlers:
10659                                 self._poll_loop()
10660
10661         def _keep_scheduling(self):
10662                 return bool(self._pkg_queue and \
10663                         not (self._failed_pkgs and not self._build_opts.fetchonly))
10664
10665         def _schedule_tasks(self):
10666                 self._schedule_tasks_imp()
10667                 self._status_display.display()
10668
10669                 state_change = 0
10670                 for q in self._task_queues.values():
10671                         if q.schedule():
10672                                 state_change += 1
10673
10674                 # Cancel prefetchers if they're the only reason
10675                 # the main poll loop is still running.
10676                 if self._failed_pkgs and not self._build_opts.fetchonly and \
10677                         not (self._jobs or self._task_queues.merge) and \
10678                         self._task_queues.fetch:
10679                         self._task_queues.fetch.clear()
10680                         state_change += 1
10681
10682                 if state_change:
10683                         self._schedule_tasks_imp()
10684                         self._status_display.display()
10685
10686                 return self._keep_scheduling()
10687
10688         def _job_delay(self):
10689                 """
10690                 @rtype: bool
10691                 @returns: True if job scheduling should be delayed, False otherwise.
10692                 """
10693
10694                 if self._jobs and self._max_load is not None:
10695
10696                         current_time = time.time()
10697
10698                         delay = self._job_delay_factor * self._jobs ** self._job_delay_exp
10699                         if delay > self._job_delay_max:
10700                                 delay = self._job_delay_max
10701                         if (current_time - self._previous_job_start_time) < delay:
10702                                 return True
10703
10704                 return False
10705
10706         def _schedule_tasks_imp(self):
10707                 """
10708                 @rtype: bool
10709                 @returns: True if state changed, False otherwise.
10710                 """
10711
10712                 state_change = 0
10713
10714                 while True:
10715
10716                         if not self._keep_scheduling():
10717                                 return bool(state_change)
10718
10719                         if self._choose_pkg_return_early or \
10720                                 not self._can_add_job() or \
10721                                 self._job_delay():
10722                                 return bool(state_change)
10723
10724                         pkg = self._choose_pkg()
10725                         if pkg is None:
10726                                 return bool(state_change)
10727
10728                         state_change += 1
10729
10730                         if not pkg.installed:
10731                                 self._pkg_count.curval += 1
10732
10733                         task = self._task(pkg)
10734
10735                         if pkg.installed:
10736                                 merge = PackageMerge(merge=task)
10737                                 merge.addExitListener(self._merge_exit)
10738                                 self._task_queues.merge.add(merge)
10739
10740                         elif pkg.built:
10741                                 self._jobs += 1
10742                                 self._previous_job_start_time = time.time()
10743                                 self._status_display.running = self._jobs
10744                                 task.addExitListener(self._extract_exit)
10745                                 self._task_queues.jobs.add(task)
10746
10747                         else:
10748                                 self._jobs += 1
10749                                 self._previous_job_start_time = time.time()
10750                                 self._status_display.running = self._jobs
10751                                 task.addExitListener(self._build_exit)
10752                                 self._task_queues.jobs.add(task)
10753
10754                 return bool(state_change)
10755
10756         def _task(self, pkg):
10757
10758                 pkg_to_replace = None
10759                 if pkg.operation != "uninstall":
10760                         vardb = pkg.root_config.trees["vartree"].dbapi
10761                         previous_cpv = vardb.match(pkg.slot_atom)
10762                         if previous_cpv:
10763                                 previous_cpv = previous_cpv.pop()
10764                                 pkg_to_replace = self._pkg(previous_cpv,
10765                                         "installed", pkg.root_config, installed=True)
10766
10767                 task = MergeListItem(args_set=self._args_set,
10768                         background=self._background, binpkg_opts=self._binpkg_opts,
10769                         build_opts=self._build_opts,
10770                         config_pool=self._ConfigPool(pkg.root,
10771                         self._allocate_config, self._deallocate_config),
10772                         emerge_opts=self.myopts,
10773                         find_blockers=self._find_blockers(pkg), logger=self._logger,
10774                         mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
10775                         pkg_to_replace=pkg_to_replace,
10776                         prefetcher=self._prefetchers.get(pkg),
10777                         scheduler=self._sched_iface,
10778                         settings=self._allocate_config(pkg.root),
10779                         statusMessage=self._status_msg,
10780                         world_atom=self._world_atom)
10781
10782                 return task
10783
10784         def _failed_pkg_msg(self, failed_pkg, action, preposition):
10785                 pkg = failed_pkg.pkg
10786                 msg = "%s to %s %s" % \
10787                         (bad("Failed"), action, colorize("INFORM", pkg.cpv))
10788                 if pkg.root != "/":
10789                         msg += " %s %s" % (preposition, pkg.root)
10790
10791                 log_path = self._locate_failure_log(failed_pkg)
10792                 if log_path is not None:
10793                         msg += ", Log file:"
10794                 self._status_msg(msg)
10795
10796                 if log_path is not None:
10797                         self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
10798
10799         def _status_msg(self, msg):
10800                 """
10801                 Display a brief status message (no newlines) in the status display.
10802                 This is called by tasks to provide feedback to the user. This
10803                 delegates the resposibility of generating \r and \n control characters,
10804                 to guarantee that lines are created or erased when necessary and
10805                 appropriate.
10806
10807                 @type msg: str
10808                 @param msg: a brief status message (no newlines allowed)
10809                 """
10810                 if not self._background:
10811                         writemsg_level("\n")
10812                 self._status_display.displayMessage(msg)
10813
10814         def _save_resume_list(self):
10815                 """
10816                 Do this before verifying the ebuild Manifests since it might
10817                 be possible for the user to use --resume --skipfirst get past
10818                 a non-essential package with a broken digest.
10819                 """
10820                 mtimedb = self._mtimedb
10821                 mtimedb["resume"]["mergelist"] = [list(x) \
10822                         for x in self._mergelist \
10823                         if isinstance(x, Package) and x.operation == "merge"]
10824
10825                 mtimedb.commit()
10826
10827         def _calc_resume_list(self):
10828                 """
10829                 Use the current resume list to calculate a new one,
10830                 dropping any packages with unsatisfied deps.
10831                 @rtype: bool
10832                 @returns: True if successful, False otherwise.
10833                 """
10834                 print colorize("GOOD", "*** Resuming merge...")
10835
10836                 if self._show_list():
10837                         if "--tree" in self.myopts:
10838                                 portage.writemsg_stdout("\n" + \
10839                                         darkgreen("These are the packages that " + \
10840                                         "would be merged, in reverse order:\n\n"))
10841
10842                         else:
10843                                 portage.writemsg_stdout("\n" + \
10844                                         darkgreen("These are the packages that " + \
10845                                         "would be merged, in order:\n\n"))
10846
10847                 show_spinner = "--quiet" not in self.myopts and \
10848                         "--nodeps" not in self.myopts
10849
10850                 if show_spinner:
10851                         print "Calculating dependencies  ",
10852
10853                 myparams = create_depgraph_params(self.myopts, None)
10854                 success = False
10855                 e = None
10856                 try:
10857                         success, mydepgraph, dropped_tasks = resume_depgraph(
10858                                 self.settings, self.trees, self._mtimedb, self.myopts,
10859                                 myparams, self._spinner, skip_unsatisfied=True)
10860                 except depgraph.UnsatisfiedResumeDep, e:
10861                         mydepgraph = e.depgraph
10862                         dropped_tasks = set()
10863
10864                 if show_spinner:
10865                         print "\b\b... done!"
10866
10867                 if e is not None:
10868                         def unsatisfied_resume_dep_msg():
10869                                 mydepgraph.display_problems()
10870                                 out = portage.output.EOutput()
10871                                 out.eerror("One or more packages are either masked or " + \
10872                                         "have missing dependencies:")
10873                                 out.eerror("")
10874                                 indent = "  "
10875                                 show_parents = set()
10876                                 for dep in e.value:
10877                                         if dep.parent in show_parents:
10878                                                 continue
10879                                         show_parents.add(dep.parent)
10880                                         if dep.atom is None:
10881                                                 out.eerror(indent + "Masked package:")
10882                                                 out.eerror(2 * indent + str(dep.parent))
10883                                                 out.eerror("")
10884                                         else:
10885                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
10886                                                 out.eerror(2 * indent + str(dep.parent))
10887                                                 out.eerror("")
10888                                 msg = "The resume list contains packages " + \
10889                                         "that are either masked or have " + \
10890                                         "unsatisfied dependencies. " + \
10891                                         "Please restart/continue " + \
10892                                         "the operation manually, or use --skipfirst " + \
10893                                         "to skip the first package in the list and " + \
10894                                         "any other packages that may be " + \
10895                                         "masked or have missing dependencies."
10896                                 for line in textwrap.wrap(msg, 72):
10897                                         out.eerror(line)
10898                         self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
10899                         return False
10900
10901                 if success and self._show_list():
10902                         mylist = mydepgraph.altlist()
10903                         if mylist:
10904                                 if "--tree" in self.myopts:
10905                                         mylist.reverse()
10906                                 mydepgraph.display(mylist, favorites=self._favorites)
10907
10908                 if not success:
10909                         self._post_mod_echo_msgs.append(mydepgraph.display_problems)
10910                         return False
10911                 mydepgraph.display_problems()
10912
10913                 mylist = mydepgraph.altlist()
10914                 mydepgraph.break_refs(mylist)
10915                 mydepgraph.break_refs(dropped_tasks)
10916                 self._mergelist = mylist
10917                 self._set_digraph(mydepgraph.schedulerGraph())
10918
10919                 msg_width = 75
10920                 for task in dropped_tasks:
10921                         if not (isinstance(task, Package) and task.operation == "merge"):
10922                                 continue
10923                         pkg = task
10924                         msg = "emerge --keep-going:" + \
10925                                 " %s" % (pkg.cpv,)
10926                         if pkg.root != "/":
10927                                 msg += " for %s" % (pkg.root,)
10928                         msg += " dropped due to unsatisfied dependency."
10929                         for line in textwrap.wrap(msg, msg_width):
10930                                 eerror(line, phase="other", key=pkg.cpv)
10931                         settings = self.pkgsettings[pkg.root]
10932                         # Ensure that log collection from $T is disabled inside
10933                         # elog_process(), since any logs that might exist are
10934                         # not valid here.
10935                         settings.pop("T", None)
10936                         portage.elog.elog_process(pkg.cpv, settings)
10937                         self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
10938
10939                 return True
10940
10941         def _show_list(self):
10942                 myopts = self.myopts
10943                 if "--quiet" not in myopts and \
10944                         ("--ask" in myopts or "--tree" in myopts or \
10945                         "--verbose" in myopts):
10946                         return True
10947                 return False
10948
10949         def _world_atom(self, pkg):
10950                 """
10951                 Add the package to the world file, but only if
10952                 it's supposed to be added. Otherwise, do nothing.
10953                 """
10954
10955                 if set(("--buildpkgonly", "--fetchonly",
10956                         "--fetch-all-uri",
10957                         "--oneshot", "--onlydeps",
10958                         "--pretend")).intersection(self.myopts):
10959                         return
10960
10961                 if pkg.root != self.target_root:
10962                         return
10963
10964                 args_set = self._args_set
10965                 if not args_set.findAtomForPackage(pkg):
10966                         return
10967
10968                 logger = self._logger
10969                 pkg_count = self._pkg_count
10970                 root_config = pkg.root_config
10971                 world_set = root_config.sets["world"]
10972                 world_locked = False
10973                 if hasattr(world_set, "lock"):
10974                         world_set.lock()
10975                         world_locked = True
10976
10977                 try:
10978                         if hasattr(world_set, "load"):
10979                                 world_set.load() # maybe it's changed on disk
10980
10981                         atom = create_world_atom(pkg, args_set, root_config)
10982                         if atom:
10983                                 if hasattr(world_set, "add"):
10984                                         self._status_msg(('Recording %s in "world" ' + \
10985                                                 'favorites file...') % atom)
10986                                         logger.log(" === (%s of %s) Updating world file (%s)" % \
10987                                                 (pkg_count.curval, pkg_count.maxval, pkg.cpv))
10988                                         world_set.add(atom)
10989                                 else:
10990                                         writemsg_level('\n!!! Unable to record %s in "world"\n' % \
10991                                                 (atom,), level=logging.WARN, noiselevel=-1)
10992                 finally:
10993                         if world_locked:
10994                                 world_set.unlock()
10995
10996         def _pkg(self, cpv, type_name, root_config, installed=False):
10997                 """
10998                 Get a package instance from the cache, or create a new
10999                 one if necessary. Raises KeyError from aux_get if it
11000                 failures for some reason (package does not exist or is
11001                 corrupt).
11002                 """
11003                 operation = "merge"
11004                 if installed:
11005                         operation = "nomerge"
11006
11007                 if self._digraph is not None:
11008                         # Reuse existing instance when available.
11009                         pkg = self._digraph.get(
11010                                 (type_name, root_config.root, cpv, operation))
11011                         if pkg is not None:
11012                                 return pkg
11013
11014                 tree_type = depgraph.pkg_tree_map[type_name]
11015                 db = root_config.trees[tree_type].dbapi
11016                 db_keys = list(self.trees[root_config.root][
11017                         tree_type].dbapi._aux_cache_keys)
11018                 metadata = izip(db_keys, db.aux_get(cpv, db_keys))
11019                 pkg = Package(cpv=cpv, metadata=metadata,
11020                         root_config=root_config, installed=installed)
11021                 if type_name == "ebuild":
11022                         settings = self.pkgsettings[root_config.root]
11023                         settings.setcpv(pkg)
11024                         pkg.metadata["USE"] = settings["PORTAGE_USE"]
11025
11026                 return pkg
11027
11028 class MetadataRegen(PollScheduler):
11029
11030         def __init__(self, portdb, max_jobs=None, max_load=None):
11031                 PollScheduler.__init__(self)
11032                 self._portdb = portdb
11033
11034                 if max_jobs is None:
11035                         max_jobs = 1
11036
11037                 self._max_jobs = max_jobs
11038                 self._max_load = max_load
11039                 self._sched_iface = self._sched_iface_class(
11040                         register=self._register,
11041                         schedule=self._schedule_wait,
11042                         unregister=self._unregister)
11043
11044                 self._valid_pkgs = set()
11045                 self._process_iter = self._iter_metadata_processes()
11046
11047         def _iter_metadata_processes(self):
11048                 portdb = self._portdb
11049                 valid_pkgs = self._valid_pkgs
11050                 every_cp = portdb.cp_all()
11051                 every_cp.sort(reverse=True)
11052
11053                 while every_cp:
11054                         cp = every_cp.pop()
11055                         portage.writemsg_stdout("Processing %s\n" % cp)
11056                         cpv_list = portdb.cp_list(cp)
11057                         for cpv in cpv_list:
11058                                 valid_pkgs.add(cpv)
11059                                 ebuild_path, repo_path = portdb.findname2(cpv)
11060                                 metadata_process = portdb._metadata_process(
11061                                         cpv, ebuild_path, repo_path)
11062                                 if metadata_process is None:
11063                                         continue
11064                                 yield metadata_process
11065
11066         def run(self):
11067
11068                 portdb = self._portdb
11069                 from portage.cache.cache_errors import CacheError
11070                 dead_nodes = {}
11071
11072                 for mytree in portdb.porttrees:
11073                         try:
11074                                 dead_nodes[mytree] = set(portdb.auxdb[mytree].iterkeys())
11075                         except CacheError, e:
11076                                 portage.writemsg("Error listing cache entries for " + \
11077                                         "'%s': %s, continuing...\n" % (mytree, e), noiselevel=-1)
11078                                 del e
11079                                 dead_nodes = None
11080                                 break
11081
11082                 while self._schedule():
11083                         self._poll_loop()
11084
11085                 while self._jobs:
11086                         self._poll_loop()
11087
11088                 if dead_nodes:
11089                         for y in self._valid_pkgs:
11090                                 for mytree in portdb.porttrees:
11091                                         if portdb.findname2(y, mytree=mytree)[0]:
11092                                                 dead_nodes[mytree].discard(y)
11093
11094                         for mytree, nodes in dead_nodes.iteritems():
11095                                 auxdb = portdb.auxdb[mytree]
11096                                 for y in nodes:
11097                                         try:
11098                                                 del auxdb[y]
11099                                         except (KeyError, CacheError):
11100                                                 pass
11101
11102         def _schedule_tasks(self):
11103                 """
11104                 @rtype: bool
11105                 @returns: True if there may be remaining tasks to schedule,
11106                         False otherwise.
11107                 """
11108                 while self._can_add_job():
11109                         try:
11110                                 metadata_process = self._process_iter.next()
11111                         except StopIteration:
11112                                 return False
11113
11114                         self._jobs += 1
11115                         metadata_process.scheduler = self._sched_iface
11116                         metadata_process.addExitListener(self._metadata_exit)
11117                         metadata_process.start()
11118                 return True
11119
11120         def _metadata_exit(self, metadata_process):
11121                 self._jobs -= 1
11122                 if metadata_process.returncode != os.EX_OK:
11123                         self._valid_pkgs.discard(metadata_process.cpv)
11124                         portage.writemsg("Error processing %s, continuing...\n" % \
11125                                 (metadata_process.cpv,))
11126                 self._schedule()
11127
11128 class UninstallFailure(portage.exception.PortageException):
11129         """
11130         An instance of this class is raised by unmerge() when
11131         an uninstallation fails.
11132         """
11133         status = 1
11134         def __init__(self, *pargs):
11135                 portage.exception.PortageException.__init__(self, pargs)
11136                 if pargs:
11137                         self.status = pargs[0]
11138
11139 def unmerge(root_config, myopts, unmerge_action,
11140         unmerge_files, ldpath_mtimes, autoclean=0,
11141         clean_world=1, clean_delay=1, ordered=0, raise_on_error=0,
11142         scheduler=None, writemsg_level=portage.util.writemsg_level):
11143
11144         quiet = "--quiet" in myopts
11145         settings = root_config.settings
11146         sets = root_config.sets
11147         vartree = root_config.trees["vartree"]
11148         candidate_catpkgs=[]
11149         global_unmerge=0
11150         xterm_titles = "notitles" not in settings.features
11151         out = portage.output.EOutput()
11152         pkg_cache = {}
11153         db_keys = list(vartree.dbapi._aux_cache_keys)
11154
11155         def _pkg(cpv):
11156                 pkg = pkg_cache.get(cpv)
11157                 if pkg is None:
11158                         pkg = Package(cpv=cpv, installed=True,
11159                                 metadata=izip(db_keys, vartree.dbapi.aux_get(cpv, db_keys)),
11160                                 root_config=root_config,
11161                                 type_name="installed")
11162                         pkg_cache[cpv] = pkg
11163                 return pkg
11164
11165         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11166         try:
11167                 # At least the parent needs to exist for the lock file.
11168                 portage.util.ensure_dirs(vdb_path)
11169         except portage.exception.PortageException:
11170                 pass
11171         vdb_lock = None
11172         try:
11173                 if os.access(vdb_path, os.W_OK):
11174                         vdb_lock = portage.locks.lockdir(vdb_path)
11175                 realsyslist = sets["system"].getAtoms()
11176                 syslist = []
11177                 for x in realsyslist:
11178                         mycp = portage.dep_getkey(x)
11179                         if mycp in settings.getvirtuals():
11180                                 providers = []
11181                                 for provider in settings.getvirtuals()[mycp]:
11182                                         if vartree.dbapi.match(provider):
11183                                                 providers.append(provider)
11184                                 if len(providers) == 1:
11185                                         syslist.extend(providers)
11186                         else:
11187                                 syslist.append(mycp)
11188         
11189                 mysettings = portage.config(clone=settings)
11190         
11191                 if not unmerge_files:
11192                         if unmerge_action == "unmerge":
11193                                 print
11194                                 print bold("emerge unmerge") + " can only be used with specific package names"
11195                                 print
11196                                 return 0
11197                         else:
11198                                 global_unmerge = 1
11199         
11200                 localtree = vartree
11201                 # process all arguments and add all
11202                 # valid db entries to candidate_catpkgs
11203                 if global_unmerge:
11204                         if not unmerge_files:
11205                                 candidate_catpkgs.extend(vartree.dbapi.cp_all())
11206                 else:
11207                         #we've got command-line arguments
11208                         if not unmerge_files:
11209                                 print "\nNo packages to unmerge have been provided.\n"
11210                                 return 0
11211                         for x in unmerge_files:
11212                                 arg_parts = x.split('/')
11213                                 if x[0] not in [".","/"] and \
11214                                         arg_parts[-1][-7:] != ".ebuild":
11215                                         #possible cat/pkg or dep; treat as such
11216                                         candidate_catpkgs.append(x)
11217                                 elif unmerge_action in ["prune","clean"]:
11218                                         print "\n!!! Prune and clean do not accept individual" + \
11219                                                 " ebuilds as arguments;\n    skipping.\n"
11220                                         continue
11221                                 else:
11222                                         # it appears that the user is specifying an installed
11223                                         # ebuild and we're in "unmerge" mode, so it's ok.
11224                                         if not os.path.exists(x):
11225                                                 print "\n!!! The path '"+x+"' doesn't exist.\n"
11226                                                 return 0
11227         
11228                                         absx   = os.path.abspath(x)
11229                                         sp_absx = absx.split("/")
11230                                         if sp_absx[-1][-7:] == ".ebuild":
11231                                                 del sp_absx[-1]
11232                                                 absx = "/".join(sp_absx)
11233         
11234                                         sp_absx_len = len(sp_absx)
11235         
11236                                         vdb_path = os.path.join(settings["ROOT"], portage.VDB_PATH)
11237                                         vdb_len  = len(vdb_path)
11238         
11239                                         sp_vdb     = vdb_path.split("/")
11240                                         sp_vdb_len = len(sp_vdb)
11241         
11242                                         if not os.path.exists(absx+"/CONTENTS"):
11243                                                 print "!!! Not a valid db dir: "+str(absx)
11244                                                 return 0
11245         
11246                                         if sp_absx_len <= sp_vdb_len:
11247                                                 # The Path is shorter... so it can't be inside the vdb.
11248                                                 print sp_absx
11249                                                 print absx
11250                                                 print "\n!!!",x,"cannot be inside "+ \
11251                                                         vdb_path+"; aborting.\n"
11252                                                 return 0
11253         
11254                                         for idx in range(0,sp_vdb_len):
11255                                                 if idx >= sp_absx_len or sp_vdb[idx] != sp_absx[idx]:
11256                                                         print sp_absx
11257                                                         print absx
11258                                                         print "\n!!!", x, "is not inside "+\
11259                                                                 vdb_path+"; aborting.\n"
11260                                                         return 0
11261         
11262                                         print "="+"/".join(sp_absx[sp_vdb_len:])
11263                                         candidate_catpkgs.append(
11264                                                 "="+"/".join(sp_absx[sp_vdb_len:]))
11265         
11266                 newline=""
11267                 if (not "--quiet" in myopts):
11268                         newline="\n"
11269                 if settings["ROOT"] != "/":
11270                         writemsg_level(darkgreen(newline+ \
11271                                 ">>> Using system located in ROOT tree %s\n" % \
11272                                 settings["ROOT"]))
11273
11274                 if (("--pretend" in myopts) or ("--ask" in myopts)) and \
11275                         not ("--quiet" in myopts):
11276                         writemsg_level(darkgreen(newline+\
11277                                 ">>> These are the packages that would be unmerged:\n"))
11278
11279                 # Preservation of order is required for --depclean and --prune so
11280                 # that dependencies are respected. Use all_selected to eliminate
11281                 # duplicate packages since the same package may be selected by
11282                 # multiple atoms.
11283                 pkgmap = []
11284                 all_selected = set()
11285                 for x in candidate_catpkgs:
11286                         # cycle through all our candidate deps and determine
11287                         # what will and will not get unmerged
11288                         try:
11289                                 mymatch = vartree.dbapi.match(x)
11290                         except portage.exception.AmbiguousPackageName, errpkgs:
11291                                 print "\n\n!!! The short ebuild name \"" + \
11292                                         x + "\" is ambiguous.  Please specify"
11293                                 print "!!! one of the following fully-qualified " + \
11294                                         "ebuild names instead:\n"
11295                                 for i in errpkgs[0]:
11296                                         print "    " + green(i)
11297                                 print
11298                                 sys.exit(1)
11299         
11300                         if not mymatch and x[0] not in "<>=~":
11301                                 mymatch = localtree.dep_match(x)
11302                         if not mymatch:
11303                                 portage.writemsg("\n--- Couldn't find '%s' to %s.\n" % \
11304                                         (x, unmerge_action), noiselevel=-1)
11305                                 continue
11306
11307                         pkgmap.append(
11308                                 {"protected": set(), "selected": set(), "omitted": set()})
11309                         mykey = len(pkgmap) - 1
11310                         if unmerge_action=="unmerge":
11311                                         for y in mymatch:
11312                                                 if y not in all_selected:
11313                                                         pkgmap[mykey]["selected"].add(y)
11314                                                         all_selected.add(y)
11315                         elif unmerge_action == "prune":
11316                                 if len(mymatch) == 1:
11317                                         continue
11318                                 best_version = mymatch[0]
11319                                 best_slot = vartree.getslot(best_version)
11320                                 best_counter = vartree.dbapi.cpv_counter(best_version)
11321                                 for mypkg in mymatch[1:]:
11322                                         myslot = vartree.getslot(mypkg)
11323                                         mycounter = vartree.dbapi.cpv_counter(mypkg)
11324                                         if (myslot == best_slot and mycounter > best_counter) or \
11325                                                 mypkg == portage.best([mypkg, best_version]):
11326                                                 if myslot == best_slot:
11327                                                         if mycounter < best_counter:
11328                                                                 # On slot collision, keep the one with the
11329                                                                 # highest counter since it is the most
11330                                                                 # recently installed.
11331                                                                 continue
11332                                                 best_version = mypkg
11333                                                 best_slot = myslot
11334                                                 best_counter = mycounter
11335                                 pkgmap[mykey]["protected"].add(best_version)
11336                                 pkgmap[mykey]["selected"].update(mypkg for mypkg in mymatch \
11337                                         if mypkg != best_version and mypkg not in all_selected)
11338                                 all_selected.update(pkgmap[mykey]["selected"])
11339                         else:
11340                                 # unmerge_action == "clean"
11341                                 slotmap={}
11342                                 for mypkg in mymatch:
11343                                         if unmerge_action == "clean":
11344                                                 myslot = localtree.getslot(mypkg)
11345                                         else:
11346                                                 # since we're pruning, we don't care about slots
11347                                                 # and put all the pkgs in together
11348                                                 myslot = 0
11349                                         if myslot not in slotmap:
11350                                                 slotmap[myslot] = {}
11351                                         slotmap[myslot][localtree.dbapi.cpv_counter(mypkg)] = mypkg
11352
11353                                 for mypkg in vartree.dbapi.cp_list(
11354                                         portage.dep_getkey(mymatch[0])):
11355                                         myslot = vartree.getslot(mypkg)
11356                                         if myslot not in slotmap:
11357                                                 slotmap[myslot] = {}
11358                                         slotmap[myslot][vartree.dbapi.cpv_counter(mypkg)] = mypkg
11359
11360                                 for myslot in slotmap:
11361                                         counterkeys = slotmap[myslot].keys()
11362                                         if not counterkeys:
11363                                                 continue
11364                                         counterkeys.sort()
11365                                         pkgmap[mykey]["protected"].add(
11366                                                 slotmap[myslot][counterkeys[-1]])
11367                                         del counterkeys[-1]
11368
11369                                         for counter in counterkeys[:]:
11370                                                 mypkg = slotmap[myslot][counter]
11371                                                 if mypkg not in mymatch:
11372                                                         counterkeys.remove(counter)
11373                                                         pkgmap[mykey]["protected"].add(
11374                                                                 slotmap[myslot][counter])
11375
11376                                         #be pretty and get them in order of merge:
11377                                         for ckey in counterkeys:
11378                                                 mypkg = slotmap[myslot][ckey]
11379                                                 if mypkg not in all_selected:
11380                                                         pkgmap[mykey]["selected"].add(mypkg)
11381                                                         all_selected.add(mypkg)
11382                                         # ok, now the last-merged package
11383                                         # is protected, and the rest are selected
11384                 numselected = len(all_selected)
11385                 if global_unmerge and not numselected:
11386                         portage.writemsg_stdout("\n>>> No outdated packages were found on your system.\n")
11387                         return 0
11388         
11389                 if not numselected:
11390                         portage.writemsg_stdout(
11391                                 "\n>>> No packages selected for removal by " + \
11392                                 unmerge_action + "\n")
11393                         return 0
11394         finally:
11395                 if vdb_lock:
11396                         vartree.dbapi.flush_cache()
11397                         portage.locks.unlockdir(vdb_lock)
11398         
11399         from portage.sets.base import EditablePackageSet
11400         
11401         # generate a list of package sets that are directly or indirectly listed in "world",
11402         # as there is no persistent list of "installed" sets
11403         installed_sets = ["world"]
11404         stop = False
11405         pos = 0
11406         while not stop:
11407                 stop = True
11408                 pos = len(installed_sets)
11409                 for s in installed_sets[pos - 1:]:
11410                         if s not in sets:
11411                                 continue
11412                         candidates = [x[len(SETPREFIX):] for x in sets[s].getNonAtoms() if x.startswith(SETPREFIX)]
11413                         if candidates:
11414                                 stop = False
11415                                 installed_sets += candidates
11416         installed_sets = [x for x in installed_sets if x not in root_config.setconfig.active]
11417         del stop, pos
11418
11419         # we don't want to unmerge packages that are still listed in user-editable package sets
11420         # listed in "world" as they would be remerged on the next update of "world" or the 
11421         # relevant package sets.
11422         unknown_sets = set()
11423         for cp in xrange(len(pkgmap)):
11424                 for cpv in pkgmap[cp]["selected"].copy():
11425                         try:
11426                                 pkg = _pkg(cpv)
11427                         except KeyError:
11428                                 # It could have been uninstalled
11429                                 # by a concurrent process.
11430                                 continue
11431
11432                         if unmerge_action != "clean" and \
11433                                 root_config.root == "/" and \
11434                                 portage.match_from_list(
11435                                 portage.const.PORTAGE_PACKAGE_ATOM, [pkg]):
11436                                 msg = ("Not unmerging package %s since there is no valid " + \
11437                                 "reason for portage to unmerge itself.") % (pkg.cpv,)
11438                                 for line in textwrap.wrap(msg, 75):
11439                                         out.eerror(line)
11440                                 # adjust pkgmap so the display output is correct
11441                                 pkgmap[cp]["selected"].remove(cpv)
11442                                 all_selected.remove(cpv)
11443                                 pkgmap[cp]["protected"].add(cpv)
11444                                 continue
11445
11446                         parents = []
11447                         for s in installed_sets:
11448                                 # skip sets that the user requested to unmerge, and skip world 
11449                                 # unless we're unmerging a package set (as the package would be 
11450                                 # removed from "world" later on)
11451                                 if s in root_config.setconfig.active or (s == "world" and not root_config.setconfig.active):
11452                                         continue
11453
11454                                 if s not in sets:
11455                                         if s in unknown_sets:
11456                                                 continue
11457                                         unknown_sets.add(s)
11458                                         out = portage.output.EOutput()
11459                                         out.eerror(("Unknown set '@%s' in " + \
11460                                                 "%svar/lib/portage/world_sets") % \
11461                                                 (s, root_config.root))
11462                                         continue
11463
11464                                 # only check instances of EditablePackageSet as other classes are generally used for
11465                                 # special purposes and can be ignored here (and are usually generated dynamically, so the
11466                                 # user can't do much about them anyway)
11467                                 if isinstance(sets[s], EditablePackageSet):
11468
11469                                         # This is derived from a snippet of code in the
11470                                         # depgraph._iter_atoms_for_pkg() method.
11471                                         for atom in sets[s].iterAtomsForPackage(pkg):
11472                                                 inst_matches = vartree.dbapi.match(atom)
11473                                                 inst_matches.reverse() # descending order
11474                                                 higher_slot = None
11475                                                 for inst_cpv in inst_matches:
11476                                                         try:
11477                                                                 inst_pkg = _pkg(inst_cpv)
11478                                                         except KeyError:
11479                                                                 # It could have been uninstalled
11480                                                                 # by a concurrent process.
11481                                                                 continue
11482
11483                                                         if inst_pkg.cp != atom.cp:
11484                                                                 continue
11485                                                         if pkg >= inst_pkg:
11486                                                                 # This is descending order, and we're not
11487                                                                 # interested in any versions <= pkg given.
11488                                                                 break
11489                                                         if pkg.slot_atom != inst_pkg.slot_atom:
11490                                                                 higher_slot = inst_pkg
11491                                                                 break
11492                                                 if higher_slot is None:
11493                                                         parents.append(s)
11494                                                         break
11495                         if parents:
11496                                 #print colorize("WARN", "Package %s is going to be unmerged," % cpv)
11497                                 #print colorize("WARN", "but still listed in the following package sets:")
11498                                 #print "    %s\n" % ", ".join(parents)
11499                                 print colorize("WARN", "Not unmerging package %s as it is" % cpv)
11500                                 print colorize("WARN", "still referenced by the following package sets:")
11501                                 print "    %s\n" % ", ".join(parents)
11502                                 # adjust pkgmap so the display output is correct
11503                                 pkgmap[cp]["selected"].remove(cpv)
11504                                 all_selected.remove(cpv)
11505                                 pkgmap[cp]["protected"].add(cpv)
11506         
11507         del installed_sets
11508
11509         numselected = len(all_selected)
11510         if not numselected:
11511                 writemsg_level(
11512                         "\n>>> No packages selected for removal by " + \
11513                         unmerge_action + "\n")
11514                 return 0
11515
11516         # Unmerge order only matters in some cases
11517         if not ordered:
11518                 unordered = {}
11519                 for d in pkgmap:
11520                         selected = d["selected"]
11521                         if not selected:
11522                                 continue
11523                         cp = portage.cpv_getkey(iter(selected).next())
11524                         cp_dict = unordered.get(cp)
11525                         if cp_dict is None:
11526                                 cp_dict = {}
11527                                 unordered[cp] = cp_dict
11528                                 for k in d:
11529                                         cp_dict[k] = set()
11530                         for k, v in d.iteritems():
11531                                 cp_dict[k].update(v)
11532                 pkgmap = [unordered[cp] for cp in sorted(unordered)]
11533
11534         for x in xrange(len(pkgmap)):
11535                 selected = pkgmap[x]["selected"]
11536                 if not selected:
11537                         continue
11538                 for mytype, mylist in pkgmap[x].iteritems():
11539                         if mytype == "selected":
11540                                 continue
11541                         mylist.difference_update(all_selected)
11542                 cp = portage.cpv_getkey(iter(selected).next())
11543                 for y in localtree.dep_match(cp):
11544                         if y not in pkgmap[x]["omitted"] and \
11545                                 y not in pkgmap[x]["selected"] and \
11546                                 y not in pkgmap[x]["protected"] and \
11547                                 y not in all_selected:
11548                                 pkgmap[x]["omitted"].add(y)
11549                 if global_unmerge and not pkgmap[x]["selected"]:
11550                         #avoid cluttering the preview printout with stuff that isn't getting unmerged
11551                         continue
11552                 if not (pkgmap[x]["protected"] or pkgmap[x]["omitted"]) and cp in syslist:
11553                         writemsg_level(colorize("BAD","\a\n\n!!! " + \
11554                                 "'%s' is part of your system profile.\n" % cp),
11555                                 level=logging.WARNING, noiselevel=-1)
11556                         writemsg_level(colorize("WARN","\a!!! Unmerging it may " + \
11557                                 "be damaging to your system.\n\n"),
11558                                 level=logging.WARNING, noiselevel=-1)
11559                         if clean_delay and "--pretend" not in myopts and "--ask" not in myopts:
11560                                 countdown(int(settings["EMERGE_WARNING_DELAY"]),
11561                                         colorize("UNMERGE_WARN", "Press Ctrl-C to Stop"))
11562                 if not quiet:
11563                         writemsg_level("\n %s\n" % (bold(cp),), noiselevel=-1)
11564                 else:
11565                         writemsg_level(bold(cp) + ": ", noiselevel=-1)
11566                 for mytype in ["selected","protected","omitted"]:
11567                         if not quiet:
11568                                 writemsg_level((mytype + ": ").rjust(14), noiselevel=-1)
11569                         if pkgmap[x][mytype]:
11570                                 sorted_pkgs = [portage.catpkgsplit(mypkg)[1:] for mypkg in pkgmap[x][mytype]]
11571                                 sorted_pkgs.sort(portage.pkgcmp)
11572                                 for pn, ver, rev in sorted_pkgs:
11573                                         if rev == "r0":
11574                                                 myversion = ver
11575                                         else:
11576                                                 myversion = ver + "-" + rev
11577                                         if mytype == "selected":
11578                                                 writemsg_level(
11579                                                         colorize("UNMERGE_WARN", myversion + " "),
11580                                                         noiselevel=-1)
11581                                         else:
11582                                                 writemsg_level(
11583                                                         colorize("GOOD", myversion + " "), noiselevel=-1)
11584                         else:
11585                                 writemsg_level("none ", noiselevel=-1)
11586                         if not quiet:
11587                                 writemsg_level("\n", noiselevel=-1)
11588                 if quiet:
11589                         writemsg_level("\n", noiselevel=-1)
11590
11591         writemsg_level("\n>>> " + colorize("UNMERGE_WARN", "'Selected'") + \
11592                 " packages are slated for removal.\n")
11593         writemsg_level(">>> " + colorize("GOOD", "'Protected'") + \
11594                         " and " + colorize("GOOD", "'omitted'") + \
11595                         " packages will not be removed.\n\n")
11596
11597         if "--pretend" in myopts:
11598                 #we're done... return
11599                 return 0
11600         if "--ask" in myopts:
11601                 if userquery("Would you like to unmerge these packages?")=="No":
11602                         # enter pretend mode for correct formatting of results
11603                         myopts["--pretend"] = True
11604                         print
11605                         print "Quitting."
11606                         print
11607                         return 0
11608         #the real unmerging begins, after a short delay....
11609         if clean_delay and not autoclean:
11610                 countdown(int(settings["CLEAN_DELAY"]), ">>> Unmerging")
11611
11612         for x in xrange(len(pkgmap)):
11613                 for y in pkgmap[x]["selected"]:
11614                         writemsg_level(">>> Unmerging "+y+"...\n", noiselevel=-1)
11615                         emergelog(xterm_titles, "=== Unmerging... ("+y+")")
11616                         mysplit = y.split("/")
11617                         #unmerge...
11618                         retval = portage.unmerge(mysplit[0], mysplit[1], settings["ROOT"],
11619                                 mysettings, unmerge_action not in ["clean","prune"],
11620                                 vartree=vartree, ldpath_mtimes=ldpath_mtimes,
11621                                 scheduler=scheduler)
11622
11623                         if retval != os.EX_OK:
11624                                 emergelog(xterm_titles, " !!! unmerge FAILURE: "+y)
11625                                 if raise_on_error:
11626                                         raise UninstallFailure(retval)
11627                                 sys.exit(retval)
11628                         else:
11629                                 if clean_world and hasattr(sets["world"], "cleanPackage"):
11630                                         sets["world"].cleanPackage(vartree.dbapi, y)
11631                                 emergelog(xterm_titles, " >>> unmerge success: "+y)
11632         if clean_world and hasattr(sets["world"], "remove"):
11633                 for s in root_config.setconfig.active:
11634                         sets["world"].remove(SETPREFIX+s)
11635         return 1
11636
11637 def chk_updated_info_files(root, infodirs, prev_mtimes, retval):
11638
11639         if os.path.exists("/usr/bin/install-info"):
11640                 out = portage.output.EOutput()
11641                 regen_infodirs=[]
11642                 for z in infodirs:
11643                         if z=='':
11644                                 continue
11645                         inforoot=normpath(root+z)
11646                         if os.path.isdir(inforoot):
11647                                 infomtime = long(os.stat(inforoot).st_mtime)
11648                                 if inforoot not in prev_mtimes or \
11649                                         prev_mtimes[inforoot] != infomtime:
11650                                                 regen_infodirs.append(inforoot)
11651
11652                 if not regen_infodirs:
11653                         portage.writemsg_stdout("\n")
11654                         out.einfo("GNU info directory index is up-to-date.")
11655                 else:
11656                         portage.writemsg_stdout("\n")
11657                         out.einfo("Regenerating GNU info directory index...")
11658
11659                         dir_extensions = ("", ".gz", ".bz2")
11660                         icount=0
11661                         badcount=0
11662                         errmsg = ""
11663                         for inforoot in regen_infodirs:
11664                                 if inforoot=='':
11665                                         continue
11666
11667                                 if not os.path.isdir(inforoot) or \
11668                                         not os.access(inforoot, os.W_OK):
11669                                         continue
11670
11671                                 file_list = os.listdir(inforoot)
11672                                 file_list.sort()
11673                                 dir_file = os.path.join(inforoot, "dir")
11674                                 moved_old_dir = False
11675                                 processed_count = 0
11676                                 for x in file_list:
11677                                         if x.startswith(".") or \
11678                                                 os.path.isdir(os.path.join(inforoot, x)):
11679                                                 continue
11680                                         if x.startswith("dir"):
11681                                                 skip = False
11682                                                 for ext in dir_extensions:
11683                                                         if x == "dir" + ext or \
11684                                                                 x == "dir" + ext + ".old":
11685                                                                 skip = True
11686                                                                 break
11687                                                 if skip:
11688                                                         continue
11689                                         if processed_count == 0:
11690                                                 for ext in dir_extensions:
11691                                                         try:
11692                                                                 os.rename(dir_file + ext, dir_file + ext + ".old")
11693                                                                 moved_old_dir = True
11694                                                         except EnvironmentError, e:
11695                                                                 if e.errno != errno.ENOENT:
11696                                                                         raise
11697                                                                 del e
11698                                         processed_count += 1
11699                                         myso=commands.getstatusoutput("LANG=C LANGUAGE=C /usr/bin/install-info --dir-file="+inforoot+"/dir "+inforoot+"/"+x)[1]
11700                                         existsstr="already exists, for file `"
11701                                         if myso!="":
11702                                                 if re.search(existsstr,myso):
11703                                                         # Already exists... Don't increment the count for this.
11704                                                         pass
11705                                                 elif myso[:44]=="install-info: warning: no info dir entry in ":
11706                                                         # This info file doesn't contain a DIR-header: install-info produces this
11707                                                         # (harmless) warning (the --quiet switch doesn't seem to work).
11708                                                         # Don't increment the count for this.
11709                                                         pass
11710                                                 else:
11711                                                         badcount=badcount+1
11712                                                         errmsg += myso + "\n"
11713                                         icount=icount+1
11714
11715                                 if moved_old_dir and not os.path.exists(dir_file):
11716                                         # We didn't generate a new dir file, so put the old file
11717                                         # back where it was originally found.
11718                                         for ext in dir_extensions:
11719                                                 try:
11720                                                         os.rename(dir_file + ext + ".old", dir_file + ext)
11721                                                 except EnvironmentError, e:
11722                                                         if e.errno != errno.ENOENT:
11723                                                                 raise
11724                                                         del e
11725
11726                                 # Clean dir.old cruft so that they don't prevent
11727                                 # unmerge of otherwise empty directories.
11728                                 for ext in dir_extensions:
11729                                         try:
11730                                                 os.unlink(dir_file + ext + ".old")
11731                                         except EnvironmentError, e:
11732                                                 if e.errno != errno.ENOENT:
11733                                                         raise
11734                                                 del e
11735
11736                                 #update mtime so we can potentially avoid regenerating.
11737                                 prev_mtimes[inforoot] = long(os.stat(inforoot).st_mtime)
11738
11739                         if badcount:
11740                                 out.eerror("Processed %d info files; %d errors." % \
11741                                         (icount, badcount))
11742                                 writemsg_level(errmsg, level=logging.ERROR, noiselevel=-1)
11743                         else:
11744                                 if icount > 0:
11745                                         out.einfo("Processed %d info files." % (icount,))
11746
11747
11748 def display_news_notification(root_config, myopts):
11749         target_root = root_config.root
11750         trees = root_config.trees
11751         settings = trees["vartree"].settings
11752         portdb = trees["porttree"].dbapi
11753         vardb = trees["vartree"].dbapi
11754         NEWS_PATH = os.path.join("metadata", "news")
11755         UNREAD_PATH = os.path.join(target_root, NEWS_LIB_PATH, "news")
11756         newsReaderDisplay = False
11757         update = "--pretend" not in myopts
11758
11759         for repo in portdb.getRepositories():
11760                 unreadItems = checkUpdatedNewsItems(
11761                         portdb, vardb, NEWS_PATH, UNREAD_PATH, repo, update=update)
11762                 if unreadItems:
11763                         if not newsReaderDisplay:
11764                                 newsReaderDisplay = True
11765                                 print
11766                         print colorize("WARN", " * IMPORTANT:"),
11767                         print "%s news items need reading for repository '%s'." % (unreadItems, repo)
11768                         
11769         
11770         if newsReaderDisplay:
11771                 print colorize("WARN", " *"),
11772                 print "Use " + colorize("GOOD", "eselect news") + " to read news items."
11773                 print
11774
11775 def display_preserved_libs(vardbapi):
11776         MAX_DISPLAY = 3
11777
11778         # Ensure the registry is consistent with existing files.
11779         vardbapi.plib_registry.pruneNonExisting()
11780
11781         if vardbapi.plib_registry.hasEntries():
11782                 print
11783                 print colorize("WARN", "!!!") + " existing preserved libs:"
11784                 plibdata = vardbapi.plib_registry.getPreservedLibs()
11785                 linkmap = vardbapi.linkmap
11786                 consumer_map = {}
11787                 owners = {}
11788                 linkmap_broken = False
11789
11790                 try:
11791                         linkmap.rebuild()
11792                 except portage.exception.CommandNotFound, e:
11793                         writemsg_level("!!! Command Not Found: %s\n" % (e,),
11794                                 level=logging.ERROR, noiselevel=-1)
11795                         del e
11796                         linkmap_broken = True
11797                 else:
11798                         search_for_owners = set()
11799                         for cpv in plibdata:
11800                                 internal_plib_keys = set(linkmap._obj_key(f) \
11801                                         for f in plibdata[cpv])
11802                                 for f in plibdata[cpv]:
11803                                         if f in consumer_map:
11804                                                 continue
11805                                         consumers = []
11806                                         for c in linkmap.findConsumers(f):
11807                                                 # Filter out any consumers that are also preserved libs
11808                                                 # belonging to the same package as the provider.
11809                                                 if linkmap._obj_key(c) not in internal_plib_keys:
11810                                                         consumers.append(c)
11811                                         consumers.sort()
11812                                         consumer_map[f] = consumers
11813                                         search_for_owners.update(consumers[:MAX_DISPLAY+1])
11814
11815                         owners = vardbapi._owners.getFileOwnerMap(search_for_owners)
11816
11817                 for cpv in plibdata:
11818                         print colorize("WARN", ">>>") + " package: %s" % cpv
11819                         samefile_map = {}
11820                         for f in plibdata[cpv]:
11821                                 obj_key = linkmap._obj_key(f)
11822                                 alt_paths = samefile_map.get(obj_key)
11823                                 if alt_paths is None:
11824                                         alt_paths = set()
11825                                         samefile_map[obj_key] = alt_paths
11826                                 alt_paths.add(f)
11827
11828                         for alt_paths in samefile_map.itervalues():
11829                                 alt_paths = sorted(alt_paths)
11830                                 for p in alt_paths:
11831                                         print colorize("WARN", " * ") + " - %s" % (p,)
11832                                 f = alt_paths[0]
11833                                 consumers = consumer_map.get(f, [])
11834                                 for c in consumers[:MAX_DISPLAY]:
11835                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11836                                                 (c, ", ".join(x.mycpv for x in owners.get(c, [])))
11837                                 if len(consumers) == MAX_DISPLAY + 1:
11838                                         print colorize("WARN", " * ") + "     used by %s (%s)" % \
11839                                                 (consumers[MAX_DISPLAY], ", ".join(x.mycpv \
11840                                                 for x in owners.get(consumers[MAX_DISPLAY], [])))
11841                                 elif len(consumers) > MAX_DISPLAY:
11842                                         print colorize("WARN", " * ") + "     used by %d other files" % (len(consumers) - MAX_DISPLAY)
11843                 print "Use " + colorize("GOOD", "emerge @preserved-rebuild") + " to rebuild packages using these libraries"
11844
11845
11846 def _flush_elog_mod_echo():
11847         """
11848         Dump the mod_echo output now so that our other
11849         notifications are shown last.
11850         @rtype: bool
11851         @returns: True if messages were shown, False otherwise.
11852         """
11853         messages_shown = False
11854         try:
11855                 from portage.elog import mod_echo
11856         except ImportError:
11857                 pass # happens during downgrade to a version without the module
11858         else:
11859                 messages_shown = bool(mod_echo._items)
11860                 mod_echo.finalize()
11861         return messages_shown
11862
11863 def post_emerge(root_config, myopts, mtimedb, retval):
11864         """
11865         Misc. things to run at the end of a merge session.
11866         
11867         Update Info Files
11868         Update Config Files
11869         Update News Items
11870         Commit mtimeDB
11871         Display preserved libs warnings
11872         Exit Emerge
11873
11874         @param trees: A dictionary mapping each ROOT to it's package databases
11875         @type trees: dict
11876         @param mtimedb: The mtimeDB to store data needed across merge invocations
11877         @type mtimedb: MtimeDB class instance
11878         @param retval: Emerge's return value
11879         @type retval: Int
11880         @rype: None
11881         @returns:
11882         1.  Calls sys.exit(retval)
11883         """
11884
11885         target_root = root_config.root
11886         trees = { target_root : root_config.trees }
11887         vardbapi = trees[target_root]["vartree"].dbapi
11888         settings = vardbapi.settings
11889         info_mtimes = mtimedb["info"]
11890
11891         # Load the most current variables from ${ROOT}/etc/profile.env
11892         settings.unlock()
11893         settings.reload()
11894         settings.regenerate()
11895         settings.lock()
11896
11897         config_protect = settings.get("CONFIG_PROTECT","").split()
11898         infodirs = settings.get("INFOPATH","").split(":") + \
11899                 settings.get("INFODIR","").split(":")
11900
11901         os.chdir("/")
11902
11903         if retval == os.EX_OK:
11904                 exit_msg = " *** exiting successfully."
11905         else:
11906                 exit_msg = " *** exiting unsuccessfully with status '%s'." % retval
11907         emergelog("notitles" not in settings.features, exit_msg)
11908
11909         _flush_elog_mod_echo()
11910
11911         counter_hash = settings.get("PORTAGE_COUNTER_HASH")
11912         if counter_hash is not None and \
11913                 counter_hash == vardbapi._counter_hash():
11914                 display_news_notification(root_config, myopts)
11915                 # If vdb state has not changed then there's nothing else to do.
11916                 sys.exit(retval)
11917
11918         vdb_path = os.path.join(target_root, portage.VDB_PATH)
11919         portage.util.ensure_dirs(vdb_path)
11920         vdb_lock = None
11921         if os.access(vdb_path, os.W_OK) and not "--pretend" in myopts:
11922                 vdb_lock = portage.locks.lockdir(vdb_path)
11923
11924         if vdb_lock:
11925                 try:
11926                         if "noinfo" not in settings.features:
11927                                 chk_updated_info_files(target_root,
11928                                         infodirs, info_mtimes, retval)
11929                         mtimedb.commit()
11930                 finally:
11931                         if vdb_lock:
11932                                 portage.locks.unlockdir(vdb_lock)
11933
11934         chk_updated_cfg_files(target_root, config_protect)
11935         
11936         display_news_notification(root_config, myopts)
11937         if retval in (None, os.EX_OK) or (not "--pretend" in myopts):
11938                 display_preserved_libs(vardbapi)        
11939
11940         sys.exit(retval)
11941
11942
11943 def chk_updated_cfg_files(target_root, config_protect):
11944         if config_protect:
11945                 #number of directories with some protect files in them
11946                 procount=0
11947                 for x in config_protect:
11948                         x = os.path.join(target_root, x.lstrip(os.path.sep))
11949                         if not os.access(x, os.W_OK):
11950                                 # Avoid Permission denied errors generated
11951                                 # later by `find`.
11952                                 continue
11953                         try:
11954                                 mymode = os.lstat(x).st_mode
11955                         except OSError:
11956                                 continue
11957                         if stat.S_ISLNK(mymode):
11958                                 # We want to treat it like a directory if it
11959                                 # is a symlink to an existing directory.
11960                                 try:
11961                                         real_mode = os.stat(x).st_mode
11962                                         if stat.S_ISDIR(real_mode):
11963                                                 mymode = real_mode
11964                                 except OSError:
11965                                         pass
11966                         if stat.S_ISDIR(mymode):
11967                                 mycommand = "find '%s' -name '.*' -type d -prune -o -name '._cfg????_*'" % x
11968                         else:
11969                                 mycommand = "find '%s' -maxdepth 1 -name '._cfg????_%s'" % \
11970                                         os.path.split(x.rstrip(os.path.sep))
11971                         mycommand += " ! -name '.*~' ! -iname '.*.bak' -print0"
11972                         a = commands.getstatusoutput(mycommand)
11973                         if a[0] != 0:
11974                                 sys.stderr.write(" %s error scanning '%s': " % (bad("*"), x))
11975                                 sys.stderr.flush()
11976                                 # Show the error message alone, sending stdout to /dev/null.
11977                                 os.system(mycommand + " 1>/dev/null")
11978                         else:
11979                                 files = a[1].split('\0')
11980                                 # split always produces an empty string as the last element
11981                                 if files and not files[-1]:
11982                                         del files[-1]
11983                                 if files:
11984                                         procount += 1
11985                                         print "\n"+colorize("WARN", " * IMPORTANT:"),
11986                                         if stat.S_ISDIR(mymode):
11987                                                  print "%d config files in '%s' need updating." % \
11988                                                         (len(files), x)
11989                                         else:
11990                                                  print "config file '%s' needs updating." % x
11991
11992                 if procount:
11993                         print " "+yellow("*")+" See the "+colorize("INFORM","CONFIGURATION FILES")+ \
11994                                 " section of the " + bold("emerge")
11995                         print " "+yellow("*")+" man page to learn how to update config files."
11996
11997 def checkUpdatedNewsItems(portdb, vardb, NEWS_PATH, UNREAD_PATH, repo_id,
11998         update=False):
11999         """
12000         Examines news items in repodir + '/' + NEWS_PATH and attempts to find unread items
12001         Returns the number of unread (yet relevent) items.
12002         
12003         @param portdb: a portage tree database
12004         @type portdb: pordbapi
12005         @param vardb: an installed package database
12006         @type vardb: vardbapi
12007         @param NEWS_PATH:
12008         @type NEWS_PATH:
12009         @param UNREAD_PATH:
12010         @type UNREAD_PATH:
12011         @param repo_id:
12012         @type repo_id:
12013         @rtype: Integer
12014         @returns:
12015         1.  The number of unread but relevant news items.
12016         
12017         """
12018         from portage.news import NewsManager
12019         manager = NewsManager(portdb, vardb, NEWS_PATH, UNREAD_PATH)
12020         return manager.getUnreadItems( repo_id, update=update )
12021
12022 def insert_category_into_atom(atom, category):
12023         alphanum = re.search(r'\w', atom)
12024         if alphanum:
12025                 ret = atom[:alphanum.start()] + "%s/" % category + \
12026                         atom[alphanum.start():]
12027         else:
12028                 ret = None
12029         return ret
12030
12031 def is_valid_package_atom(x):
12032         if "/" not in x:
12033                 alphanum = re.search(r'\w', x)
12034                 if alphanum:
12035                         x = x[:alphanum.start()] + "cat/" + x[alphanum.start():]
12036         return portage.isvalidatom(x)
12037
12038 def show_blocker_docs_link():
12039         print
12040         print "For more information about " + bad("Blocked Packages") + ", please refer to the following"
12041         print "section of the Gentoo Linux x86 Handbook (architecture is irrelevant):"
12042         print
12043         print "http://www.gentoo.org/doc/en/handbook/handbook-x86.xml?full=1#blocked"
12044         print
12045
12046 def show_mask_docs():
12047         print "For more information, see the MASKED PACKAGES section in the emerge"
12048         print "man page or refer to the Gentoo Handbook."
12049
12050 def action_sync(settings, trees, mtimedb, myopts, myaction):
12051         xterm_titles = "notitles" not in settings.features
12052         emergelog(xterm_titles, " === sync")
12053         myportdir = settings.get("PORTDIR", None)
12054         out = portage.output.EOutput()
12055         if not myportdir:
12056                 sys.stderr.write("!!! PORTDIR is undefined.  Is /etc/make.globals missing?\n")
12057                 sys.exit(1)
12058         if myportdir[-1]=="/":
12059                 myportdir=myportdir[:-1]
12060         try:
12061                 st = os.stat(myportdir)
12062         except OSError:
12063                 st = None
12064         if st is None:
12065                 print ">>>",myportdir,"not found, creating it."
12066                 os.makedirs(myportdir,0755)
12067                 st = os.stat(myportdir)
12068
12069         spawn_kwargs = {}
12070         spawn_kwargs["env"] = settings.environ()
12071         if portage.data.secpass >= 2 and \
12072                 (st.st_uid != os.getuid() and st.st_mode & 0700 or \
12073                 st.st_gid != os.getgid() and st.st_mode & 0070):
12074                 try:
12075                         homedir = pwd.getpwuid(st.st_uid).pw_dir
12076                 except KeyError:
12077                         pass
12078                 else:
12079                         # Drop privileges when syncing, in order to match
12080                         # existing uid/gid settings.
12081                         spawn_kwargs["uid"]    = st.st_uid
12082                         spawn_kwargs["gid"]    = st.st_gid
12083                         spawn_kwargs["groups"] = [st.st_gid]
12084                         spawn_kwargs["env"]["HOME"] = homedir
12085                         umask = 0002
12086                         if not st.st_mode & 0020:
12087                                 umask = umask | 0020
12088                         spawn_kwargs["umask"] = umask
12089
12090         syncuri = settings.get("SYNC", "").strip()
12091         if not syncuri:
12092                 writemsg_level("!!! SYNC is undefined. Is /etc/make.globals missing?\n",
12093                         noiselevel=-1, level=logging.ERROR)
12094                 return 1
12095
12096         vcs_dirs = frozenset([".git", ".svn", "CVS", ".hg"])
12097         vcs_dirs = vcs_dirs.intersection(os.listdir(myportdir))
12098
12099         os.umask(0022)
12100         dosyncuri = syncuri
12101         updatecache_flg = False
12102         if myaction == "metadata":
12103                 print "skipping sync"
12104                 updatecache_flg = True
12105         elif ".git" in vcs_dirs:
12106                 # Update existing git repository, and ignore the syncuri. We are
12107                 # going to trust the user and assume that the user is in the branch
12108                 # that he/she wants updated. We'll let the user manage branches with
12109                 # git directly.
12110                 if portage.process.find_binary("git") is None:
12111                         msg = ["Command not found: git",
12112                         "Type \"emerge dev-util/git\" to enable git support."]
12113                         for l in msg:
12114                                 writemsg_level("!!! %s\n" % l,
12115                                         level=logging.ERROR, noiselevel=-1)
12116                         return 1
12117                 msg = ">>> Starting git pull in %s..." % myportdir
12118                 emergelog(xterm_titles, msg )
12119                 writemsg_level(msg + "\n")
12120                 exitcode = portage.process.spawn_bash("cd %s ; git pull" % \
12121                         (portage._shell_quote(myportdir),), **spawn_kwargs)
12122                 if exitcode != os.EX_OK:
12123                         msg = "!!! git pull error in %s." % myportdir
12124                         emergelog(xterm_titles, msg)
12125                         writemsg_level(msg + "\n", level=logging.ERROR, noiselevel=-1)
12126                         return exitcode
12127                 msg = ">>> Git pull in %s successful" % myportdir
12128                 emergelog(xterm_titles, msg)
12129                 writemsg_level(msg + "\n")
12130                 exitcode = git_sync_timestamps(settings, myportdir)
12131                 if exitcode == os.EX_OK:
12132                         updatecache_flg = True
12133         elif syncuri[:8]=="rsync://":
12134                 for vcs_dir in vcs_dirs:
12135                         writemsg_level(("!!! %s appears to be under revision " + \
12136                                 "control (contains %s).\n!!! Aborting rsync sync.\n") % \
12137                                 (myportdir, vcs_dir), level=logging.ERROR, noiselevel=-1)
12138                         return 1
12139                 if not os.path.exists("/usr/bin/rsync"):
12140                         print "!!! /usr/bin/rsync does not exist, so rsync support is disabled."
12141                         print "!!! Type \"emerge net-misc/rsync\" to enable rsync support."
12142                         sys.exit(1)
12143                 mytimeout=180
12144
12145                 rsync_opts = []
12146                 import shlex, StringIO
12147                 if settings["PORTAGE_RSYNC_OPTS"] == "":
12148                         portage.writemsg("PORTAGE_RSYNC_OPTS empty or unset, using hardcoded defaults\n")
12149                         rsync_opts.extend([
12150                                 "--recursive",    # Recurse directories
12151                                 "--links",        # Consider symlinks
12152                                 "--safe-links",   # Ignore links outside of tree
12153                                 "--perms",        # Preserve permissions
12154                                 "--times",        # Preserive mod times
12155                                 "--compress",     # Compress the data transmitted
12156                                 "--force",        # Force deletion on non-empty dirs
12157                                 "--whole-file",   # Don't do block transfers, only entire files
12158                                 "--delete",       # Delete files that aren't in the master tree
12159                                 "--stats",        # Show final statistics about what was transfered
12160                                 "--timeout="+str(mytimeout), # IO timeout if not done in X seconds
12161                                 "--exclude=/distfiles",   # Exclude distfiles from consideration
12162                                 "--exclude=/local",       # Exclude local     from consideration
12163                                 "--exclude=/packages",    # Exclude packages  from consideration
12164                         ])
12165
12166                 else:
12167                         # The below validation is not needed when using the above hardcoded
12168                         # defaults.
12169
12170                         portage.writemsg("Using PORTAGE_RSYNC_OPTS instead of hardcoded defaults\n", 1)
12171                         lexer = shlex.shlex(StringIO.StringIO(
12172                                 settings.get("PORTAGE_RSYNC_OPTS","")), posix=True)
12173                         lexer.whitespace_split = True
12174                         rsync_opts.extend(lexer)
12175                         del lexer
12176
12177                         for opt in ("--recursive", "--times"):
12178                                 if opt not in rsync_opts:
12179                                         portage.writemsg(yellow("WARNING:") + " adding required option " + \
12180                                         "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12181                                         rsync_opts.append(opt)
12182         
12183                         for exclude in ("distfiles", "local", "packages"):
12184                                 opt = "--exclude=/%s" % exclude
12185                                 if opt not in rsync_opts:
12186                                         portage.writemsg(yellow("WARNING:") + \
12187                                         " adding required option %s not included in "  % opt + \
12188                                         "PORTAGE_RSYNC_OPTS (can be overridden with --exclude='!')\n")
12189                                         rsync_opts.append(opt)
12190         
12191                         if syncuri.rstrip("/").endswith(".gentoo.org/gentoo-portage"):
12192                                 def rsync_opt_startswith(opt_prefix):
12193                                         for x in rsync_opts:
12194                                                 if x.startswith(opt_prefix):
12195                                                         return True
12196                                         return False
12197
12198                                 if not rsync_opt_startswith("--timeout="):
12199                                         rsync_opts.append("--timeout=%d" % mytimeout)
12200
12201                                 for opt in ("--compress", "--whole-file"):
12202                                         if opt not in rsync_opts:
12203                                                 portage.writemsg(yellow("WARNING:") + " adding required option " + \
12204                                                 "%s not included in PORTAGE_RSYNC_OPTS\n" % opt)
12205                                                 rsync_opts.append(opt)
12206
12207                 if "--quiet" in myopts:
12208                         rsync_opts.append("--quiet")    # Shut up a lot
12209                 else:
12210                         rsync_opts.append("--verbose")  # Print filelist
12211
12212                 if "--verbose" in myopts:
12213                         rsync_opts.append("--progress")  # Progress meter for each file
12214
12215                 if "--debug" in myopts:
12216                         rsync_opts.append("--checksum") # Force checksum on all files
12217
12218                 # Real local timestamp file.
12219                 servertimestampfile = os.path.join(
12220                         myportdir, "metadata", "timestamp.chk")
12221
12222                 content = portage.util.grabfile(servertimestampfile)
12223                 mytimestamp = 0
12224                 if content:
12225                         try:
12226                                 mytimestamp = time.mktime(time.strptime(content[0],
12227                                         "%a, %d %b %Y %H:%M:%S +0000"))
12228                         except (OverflowError, ValueError):
12229                                 pass
12230                 del content
12231
12232                 try:
12233                         rsync_initial_timeout = \
12234                                 int(settings.get("PORTAGE_RSYNC_INITIAL_TIMEOUT", "15"))
12235                 except ValueError:
12236                         rsync_initial_timeout = 15
12237
12238                 try:
12239                         maxretries=int(settings["PORTAGE_RSYNC_RETRIES"])
12240                 except SystemExit, e:
12241                         raise # Needed else can't exit
12242                 except:
12243                         maxretries=3 #default number of retries
12244
12245                 retries=0
12246                 user_name, hostname, port = re.split(
12247                         "rsync://([^:/]+@)?([^:/]*)(:[0-9]+)?", syncuri, maxsplit=3)[1:4]
12248                 if port is None:
12249                         port=""
12250                 if user_name is None:
12251                         user_name=""
12252                 updatecache_flg=True
12253                 all_rsync_opts = set(rsync_opts)
12254                 lexer = shlex.shlex(StringIO.StringIO(
12255                         settings.get("PORTAGE_RSYNC_EXTRA_OPTS","")), posix=True)
12256                 lexer.whitespace_split = True
12257                 extra_rsync_opts = list(lexer)
12258                 del lexer
12259                 all_rsync_opts.update(extra_rsync_opts)
12260                 family = socket.AF_INET
12261                 if "-4" in all_rsync_opts or "--ipv4" in all_rsync_opts:
12262                         family = socket.AF_INET
12263                 elif socket.has_ipv6 and \
12264                         ("-6" in all_rsync_opts or "--ipv6" in all_rsync_opts):
12265                         family = socket.AF_INET6
12266                 ips=[]
12267                 SERVER_OUT_OF_DATE = -1
12268                 EXCEEDED_MAX_RETRIES = -2
12269                 while (1):
12270                         if ips:
12271                                 del ips[0]
12272                         if ips==[]:
12273                                 try:
12274                                         for addrinfo in socket.getaddrinfo(
12275                                                 hostname, None, family, socket.SOCK_STREAM):
12276                                                 if socket.has_ipv6 and addrinfo[0] == socket.AF_INET6:
12277                                                         # IPv6 addresses need to be enclosed in square brackets
12278                                                         ips.append("[%s]" % addrinfo[4][0])
12279                                                 else:
12280                                                         ips.append(addrinfo[4][0])
12281                                         from random import shuffle
12282                                         shuffle(ips)
12283                                 except SystemExit, e:
12284                                         raise # Needed else can't exit
12285                                 except Exception, e:
12286                                         print "Notice:",str(e)
12287                                         dosyncuri=syncuri
12288
12289                         if ips:
12290                                 try:
12291                                         dosyncuri = syncuri.replace(
12292                                                 "//" + user_name + hostname + port + "/",
12293                                                 "//" + user_name + ips[0] + port + "/", 1)
12294                                 except SystemExit, e:
12295                                         raise # Needed else can't exit
12296                                 except Exception, e:
12297                                         print "Notice:",str(e)
12298                                         dosyncuri=syncuri
12299
12300                         if (retries==0):
12301                                 if "--ask" in myopts:
12302                                         if userquery("Do you want to sync your Portage tree with the mirror at\n" + blue(dosyncuri) + bold("?"))=="No":
12303                                                 print
12304                                                 print "Quitting."
12305                                                 print
12306                                                 sys.exit(0)
12307                                 emergelog(xterm_titles, ">>> Starting rsync with " + dosyncuri)
12308                                 if "--quiet" not in myopts:
12309                                         print ">>> Starting rsync with "+dosyncuri+"..."
12310                         else:
12311                                 emergelog(xterm_titles,
12312                                         ">>> Starting retry %d of %d with %s" % \
12313                                                 (retries,maxretries,dosyncuri))
12314                                 print "\n\n>>> Starting retry %d of %d with %s" % (retries,maxretries,dosyncuri)
12315
12316                         if mytimestamp != 0 and "--quiet" not in myopts:
12317                                 print ">>> Checking server timestamp ..."
12318
12319                         rsynccommand = ["/usr/bin/rsync"] + rsync_opts + extra_rsync_opts
12320
12321                         if "--debug" in myopts:
12322                                 print rsynccommand
12323
12324                         exitcode = os.EX_OK
12325                         servertimestamp = 0
12326                         # Even if there's no timestamp available locally, fetch the
12327                         # timestamp anyway as an initial probe to verify that the server is
12328                         # responsive.  This protects us from hanging indefinitely on a
12329                         # connection attempt to an unresponsive server which rsync's
12330                         # --timeout option does not prevent.
12331                         if True:
12332                                 # Temporary file for remote server timestamp comparison.
12333                                 from tempfile import mkstemp
12334                                 fd, tmpservertimestampfile = mkstemp()
12335                                 os.close(fd)
12336                                 mycommand = rsynccommand[:]
12337                                 mycommand.append(dosyncuri.rstrip("/") + \
12338                                         "/metadata/timestamp.chk")
12339                                 mycommand.append(tmpservertimestampfile)
12340                                 content = None
12341                                 mypids = []
12342                                 try:
12343                                         def timeout_handler(signum, frame):
12344                                                 raise portage.exception.PortageException("timed out")
12345                                         signal.signal(signal.SIGALRM, timeout_handler)
12346                                         # Timeout here in case the server is unresponsive.  The
12347                                         # --timeout rsync option doesn't apply to the initial
12348                                         # connection attempt.
12349                                         if rsync_initial_timeout:
12350                                                 signal.alarm(rsync_initial_timeout)
12351                                         try:
12352                                                 mypids.extend(portage.process.spawn(
12353                                                         mycommand, env=settings.environ(), returnpid=True))
12354                                                 exitcode = os.waitpid(mypids[0], 0)[1]
12355                                                 content = portage.grabfile(tmpservertimestampfile)
12356                                         finally:
12357                                                 if rsync_initial_timeout:
12358                                                         signal.alarm(0)
12359                                                 try:
12360                                                         os.unlink(tmpservertimestampfile)
12361                                                 except OSError:
12362                                                         pass
12363                                 except portage.exception.PortageException, e:
12364                                         # timed out
12365                                         print e
12366                                         del e
12367                                         if mypids and os.waitpid(mypids[0], os.WNOHANG) == (0,0):
12368                                                 os.kill(mypids[0], signal.SIGTERM)
12369                                                 os.waitpid(mypids[0], 0)
12370                                         # This is the same code rsync uses for timeout.
12371                                         exitcode = 30
12372                                 else:
12373                                         if exitcode != os.EX_OK:
12374                                                 if exitcode & 0xff:
12375                                                         exitcode = (exitcode & 0xff) << 8
12376                                                 else:
12377                                                         exitcode = exitcode >> 8
12378                                 if mypids:
12379                                         portage.process.spawned_pids.remove(mypids[0])
12380                                 if content:
12381                                         try:
12382                                                 servertimestamp = time.mktime(time.strptime(
12383                                                         content[0], "%a, %d %b %Y %H:%M:%S +0000"))
12384                                         except (OverflowError, ValueError):
12385                                                 pass
12386                                 del mycommand, mypids, content
12387                         if exitcode == os.EX_OK:
12388                                 if (servertimestamp != 0) and (servertimestamp == mytimestamp):
12389                                         emergelog(xterm_titles,
12390                                                 ">>> Cancelling sync -- Already current.")
12391                                         print
12392                                         print ">>>"
12393                                         print ">>> Timestamps on the server and in the local repository are the same."
12394                                         print ">>> Cancelling all further sync action. You are already up to date."
12395                                         print ">>>"
12396                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12397                                         print ">>>"
12398                                         print
12399                                         sys.exit(0)
12400                                 elif (servertimestamp != 0) and (servertimestamp < mytimestamp):
12401                                         emergelog(xterm_titles,
12402                                                 ">>> Server out of date: %s" % dosyncuri)
12403                                         print
12404                                         print ">>>"
12405                                         print ">>> SERVER OUT OF DATE: %s" % dosyncuri
12406                                         print ">>>"
12407                                         print ">>> In order to force sync, remove '%s'." % servertimestampfile
12408                                         print ">>>"
12409                                         print
12410                                         exitcode = SERVER_OUT_OF_DATE
12411                                 elif (servertimestamp == 0) or (servertimestamp > mytimestamp):
12412                                         # actual sync
12413                                         mycommand = rsynccommand + [dosyncuri+"/", myportdir]
12414                                         exitcode = portage.process.spawn(mycommand, **spawn_kwargs)
12415                                         if exitcode in [0,1,3,4,11,14,20,21]:
12416                                                 break
12417                         elif exitcode in [1,3,4,11,14,20,21]:
12418                                 break
12419                         else:
12420                                 # Code 2 indicates protocol incompatibility, which is expected
12421                                 # for servers with protocol < 29 that don't support
12422                                 # --prune-empty-directories.  Retry for a server that supports
12423                                 # at least rsync protocol version 29 (>=rsync-2.6.4).
12424                                 pass
12425
12426                         retries=retries+1
12427
12428                         if retries<=maxretries:
12429                                 print ">>> Retrying..."
12430                                 time.sleep(11)
12431                         else:
12432                                 # over retries
12433                                 # exit loop
12434                                 updatecache_flg=False
12435                                 exitcode = EXCEEDED_MAX_RETRIES
12436                                 break
12437
12438                 if (exitcode==0):
12439                         emergelog(xterm_titles, "=== Sync completed with %s" % dosyncuri)
12440                 elif exitcode == SERVER_OUT_OF_DATE:
12441                         sys.exit(1)
12442                 elif exitcode == EXCEEDED_MAX_RETRIES:
12443                         sys.stderr.write(
12444                                 ">>> Exceeded PORTAGE_RSYNC_RETRIES: %s\n" % maxretries)
12445                         sys.exit(1)
12446                 elif (exitcode>0):
12447                         msg = []
12448                         if exitcode==1:
12449                                 msg.append("Rsync has reported that there is a syntax error. Please ensure")
12450                                 msg.append("that your SYNC statement is proper.")
12451                                 msg.append("SYNC=" + settings["SYNC"])
12452                         elif exitcode==11:
12453                                 msg.append("Rsync has reported that there is a File IO error. Normally")
12454                                 msg.append("this means your disk is full, but can be caused by corruption")
12455                                 msg.append("on the filesystem that contains PORTDIR. Please investigate")
12456                                 msg.append("and try again after the problem has been fixed.")
12457                                 msg.append("PORTDIR=" + settings["PORTDIR"])
12458                         elif exitcode==20:
12459                                 msg.append("Rsync was killed before it finished.")
12460                         else:
12461                                 msg.append("Rsync has not successfully finished. It is recommended that you keep")
12462                                 msg.append("trying or that you use the 'emerge-webrsync' option if you are unable")
12463                                 msg.append("to use rsync due to firewall or other restrictions. This should be a")
12464                                 msg.append("temporary problem unless complications exist with your network")
12465                                 msg.append("(and possibly your system's filesystem) configuration.")
12466                         for line in msg:
12467                                 out.eerror(line)
12468                         sys.exit(exitcode)
12469         elif syncuri[:6]=="cvs://":
12470                 if not os.path.exists("/usr/bin/cvs"):
12471                         print "!!! /usr/bin/cvs does not exist, so CVS support is disabled."
12472                         print "!!! Type \"emerge dev-util/cvs\" to enable CVS support."
12473                         sys.exit(1)
12474                 cvsroot=syncuri[6:]
12475                 cvsdir=os.path.dirname(myportdir)
12476                 if not os.path.exists(myportdir+"/CVS"):
12477                         #initial checkout
12478                         print ">>> Starting initial cvs checkout with "+syncuri+"..."
12479                         if os.path.exists(cvsdir+"/gentoo-x86"):
12480                                 print "!!! existing",cvsdir+"/gentoo-x86 directory; exiting."
12481                                 sys.exit(1)
12482                         try:
12483                                 os.rmdir(myportdir)
12484                         except OSError, e:
12485                                 if e.errno != errno.ENOENT:
12486                                         sys.stderr.write(
12487                                                 "!!! existing '%s' directory; exiting.\n" % myportdir)
12488                                         sys.exit(1)
12489                                 del e
12490                         if portage.spawn("cd "+cvsdir+"; cvs -z0 -d "+cvsroot+" co -P gentoo-x86",settings,free=1):
12491                                 print "!!! cvs checkout error; exiting."
12492                                 sys.exit(1)
12493                         os.rename(os.path.join(cvsdir, "gentoo-x86"), myportdir)
12494                 else:
12495                         #cvs update
12496                         print ">>> Starting cvs update with "+syncuri+"..."
12497                         retval = portage.process.spawn_bash(
12498                                 "cd %s; cvs -z0 -q update -dP" % \
12499                                 (portage._shell_quote(myportdir),), **spawn_kwargs)
12500                         if retval != os.EX_OK:
12501                                 sys.exit(retval)
12502                 dosyncuri = syncuri
12503         else:
12504                 writemsg_level("!!! Unrecognized protocol: SYNC='%s'\n" % (syncuri,),
12505                         noiselevel=-1, level=logging.ERROR)
12506                 return 1
12507
12508         if updatecache_flg and  \
12509                 myaction != "metadata" and \
12510                 "metadata-transfer" not in settings.features:
12511                 updatecache_flg = False
12512
12513         # Reload the whole config from scratch.
12514         settings, trees, mtimedb = load_emerge_config(trees=trees)
12515         root_config = trees[settings["ROOT"]]["root_config"]
12516         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12517
12518         if os.path.exists(myportdir+"/metadata/cache") and updatecache_flg:
12519                 action_metadata(settings, portdb, myopts)
12520
12521         if portage._global_updates(trees, mtimedb["updates"]):
12522                 mtimedb.commit()
12523                 # Reload the whole config from scratch.
12524                 settings, trees, mtimedb = load_emerge_config(trees=trees)
12525                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
12526                 root_config = trees[settings["ROOT"]]["root_config"]
12527
12528         mybestpv = portdb.xmatch("bestmatch-visible",
12529                 portage.const.PORTAGE_PACKAGE_ATOM)
12530         mypvs = portage.best(
12531                 trees[settings["ROOT"]]["vartree"].dbapi.match(
12532                 portage.const.PORTAGE_PACKAGE_ATOM))
12533
12534         chk_updated_cfg_files("/", settings.get("CONFIG_PROTECT","").split())
12535
12536         if myaction != "metadata":
12537                 if os.access(portage.USER_CONFIG_PATH + "/bin/post_sync", os.X_OK):
12538                         retval = portage.process.spawn(
12539                                 [os.path.join(portage.USER_CONFIG_PATH, "bin", "post_sync"),
12540                                 dosyncuri], env=settings.environ())
12541                         if retval != os.EX_OK:
12542                                 print red(" * ")+bold("spawn failed of "+ portage.USER_CONFIG_PATH + "/bin/post_sync")
12543
12544         if(mybestpv != mypvs) and not "--quiet" in myopts:
12545                 print
12546                 print red(" * ")+bold("An update to portage is available.")+" It is _highly_ recommended"
12547                 print red(" * ")+"that you update portage now, before any other packages are updated."
12548                 print
12549                 print red(" * ")+"To update portage, run 'emerge portage' now."
12550                 print
12551         
12552         display_news_notification(root_config, myopts)
12553         return os.EX_OK
12554
12555 def git_sync_timestamps(settings, portdir):
12556         """
12557         Since git doesn't preserve timestamps, synchronize timestamps between
12558         entries and ebuilds/eclasses. Assume the cache has the correct timestamp
12559         for a given file as long as the file in the working tree is not modified
12560         (relative to HEAD).
12561         """
12562         cache_dir = os.path.join(portdir, "metadata", "cache")
12563         if not os.path.isdir(cache_dir):
12564                 return os.EX_OK
12565         writemsg_level(">>> Synchronizing timestamps...\n")
12566
12567         from portage.cache.cache_errors import CacheError
12568         try:
12569                 cache_db = settings.load_best_module("portdbapi.metadbmodule")(
12570                         portdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12571         except CacheError, e:
12572                 writemsg_level("!!! Unable to instantiate cache: %s\n" % (e,),
12573                         level=logging.ERROR, noiselevel=-1)
12574                 return 1
12575
12576         ec_dir = os.path.join(portdir, "eclass")
12577         try:
12578                 ec_names = set(f[:-7] for f in os.listdir(ec_dir) \
12579                         if f.endswith(".eclass"))
12580         except OSError, e:
12581                 writemsg_level("!!! Unable to list eclasses: %s\n" % (e,),
12582                         level=logging.ERROR, noiselevel=-1)
12583                 return 1
12584
12585         args = [portage.const.BASH_BINARY, "-c",
12586                 "cd %s && git diff-index --name-only --diff-filter=M HEAD" % \
12587                 portage._shell_quote(portdir)]
12588         import subprocess
12589         proc = subprocess.Popen(args, stdout=subprocess.PIPE)
12590         modified_files = set(l.rstrip("\n") for l in proc.stdout)
12591         rval = proc.wait()
12592         if rval != os.EX_OK:
12593                 return rval
12594
12595         modified_eclasses = set(ec for ec in ec_names \
12596                 if os.path.join("eclass", ec + ".eclass") in modified_files)
12597
12598         updated_ec_mtimes = {}
12599
12600         for cpv in cache_db:
12601                 cpv_split = portage.catpkgsplit(cpv)
12602                 if cpv_split is None:
12603                         writemsg_level("!!! Invalid cache entry: %s\n" % (cpv,),
12604                                 level=logging.ERROR, noiselevel=-1)
12605                         continue
12606
12607                 cat, pn, ver, rev = cpv_split
12608                 cat, pf = portage.catsplit(cpv)
12609                 relative_eb_path = os.path.join(cat, pn, pf + ".ebuild")
12610                 if relative_eb_path in modified_files:
12611                         continue
12612
12613                 try:
12614                         cache_entry = cache_db[cpv]
12615                         eb_mtime = cache_entry.get("_mtime_")
12616                         ec_mtimes = cache_entry.get("_eclasses_")
12617                 except KeyError:
12618                         writemsg_level("!!! Missing cache entry: %s\n" % (cpv,),
12619                                 level=logging.ERROR, noiselevel=-1)
12620                         continue
12621                 except CacheError, e:
12622                         writemsg_level("!!! Unable to access cache entry: %s %s\n" % \
12623                                 (cpv, e), level=logging.ERROR, noiselevel=-1)
12624                         continue
12625
12626                 if eb_mtime is None:
12627                         writemsg_level("!!! Missing ebuild mtime: %s\n" % (cpv,),
12628                                 level=logging.ERROR, noiselevel=-1)
12629                         continue
12630
12631                 try:
12632                         eb_mtime = long(eb_mtime)
12633                 except ValueError:
12634                         writemsg_level("!!! Invalid ebuild mtime: %s %s\n" % \
12635                                 (cpv, eb_mtime), level=logging.ERROR, noiselevel=-1)
12636                         continue
12637
12638                 if ec_mtimes is None:
12639                         writemsg_level("!!! Missing eclass mtimes: %s\n" % (cpv,),
12640                                 level=logging.ERROR, noiselevel=-1)
12641                         continue
12642
12643                 if modified_eclasses.intersection(ec_mtimes):
12644                         continue
12645
12646                 missing_eclasses = set(ec_mtimes).difference(ec_names)
12647                 if missing_eclasses:
12648                         writemsg_level("!!! Non-existent eclass(es): %s %s\n" % \
12649                                 (cpv, sorted(missing_eclasses)), level=logging.ERROR,
12650                                 noiselevel=-1)
12651                         continue
12652
12653                 eb_path = os.path.join(portdir, relative_eb_path)
12654                 try:
12655                         current_eb_mtime = os.stat(eb_path)
12656                 except OSError:
12657                         writemsg_level("!!! Missing ebuild: %s\n" % \
12658                                 (cpv,), level=logging.ERROR, noiselevel=-1)
12659                         continue
12660
12661                 inconsistent = False
12662                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12663                         updated_mtime = updated_ec_mtimes.get(ec)
12664                         if updated_mtime is not None and updated_mtime != ec_mtime:
12665                                 writemsg_level("!!! Inconsistent eclass mtime: %s %s\n" % \
12666                                         (cpv, ec), level=logging.ERROR, noiselevel=-1)
12667                                 inconsistent = True
12668                                 break
12669
12670                 if inconsistent:
12671                         continue
12672
12673                 if current_eb_mtime != eb_mtime:
12674                         os.utime(eb_path, (eb_mtime, eb_mtime))
12675
12676                 for ec, (ec_path, ec_mtime) in ec_mtimes.iteritems():
12677                         if ec in updated_ec_mtimes:
12678                                 continue
12679                         ec_path = os.path.join(ec_dir, ec + ".eclass")
12680                         current_mtime = long(os.stat(ec_path).st_mtime)
12681                         if current_mtime != ec_mtime:
12682                                 os.utime(ec_path, (ec_mtime, ec_mtime))
12683                         updated_ec_mtimes[ec] = ec_mtime
12684
12685         return os.EX_OK
12686
12687 def action_metadata(settings, portdb, myopts):
12688         portage.writemsg_stdout("\n>>> Updating Portage cache:      ")
12689         old_umask = os.umask(0002)
12690         cachedir = os.path.normpath(settings.depcachedir)
12691         if cachedir in ["/",    "/bin", "/dev",  "/etc",  "/home",
12692                                         "/lib", "/opt", "/proc", "/root", "/sbin",
12693                                         "/sys", "/tmp", "/usr",  "/var"]:
12694                 print >> sys.stderr, "!!! PORTAGE_DEPCACHEDIR IS SET TO A PRIMARY " + \
12695                         "ROOT DIRECTORY ON YOUR SYSTEM."
12696                 print >> sys.stderr, \
12697                         "!!! This is ALMOST CERTAINLY NOT what you want: '%s'" % cachedir
12698                 sys.exit(73)
12699         if not os.path.exists(cachedir):
12700                 os.mkdir(cachedir)
12701
12702         ec = portage.eclass_cache.cache(portdb.porttree_root)
12703         myportdir = os.path.realpath(settings["PORTDIR"])
12704         cm = settings.load_best_module("portdbapi.metadbmodule")(
12705                 myportdir, "metadata/cache", portage.auxdbkeys[:], readonly=True)
12706
12707         from portage.cache import util
12708
12709         class percentage_noise_maker(util.quiet_mirroring):
12710                 def __init__(self, dbapi):
12711                         self.dbapi = dbapi
12712                         self.cp_all = dbapi.cp_all()
12713                         l = len(self.cp_all)
12714                         self.call_update_min = 100000000
12715                         self.min_cp_all = l/100.0
12716                         self.count = 1
12717                         self.pstr = ''
12718
12719                 def __iter__(self):
12720                         for x in self.cp_all:
12721                                 self.count += 1
12722                                 if self.count > self.min_cp_all:
12723                                         self.call_update_min = 0
12724                                         self.count = 0
12725                                 for y in self.dbapi.cp_list(x):
12726                                         yield y
12727                         self.call_update_mine = 0
12728
12729                 def update(self, *arg):
12730                         try:                            self.pstr = int(self.pstr) + 1
12731                         except ValueError:      self.pstr = 1
12732                         sys.stdout.write("%s%i%%" % \
12733                                 ("\b" * (len(str(self.pstr))+1), self.pstr))
12734                         sys.stdout.flush()
12735                         self.call_update_min = 10000000
12736
12737                 def finish(self, *arg):
12738                         sys.stdout.write("\b\b\b\b100%\n")
12739                         sys.stdout.flush()
12740
12741         if "--quiet" in myopts:
12742                 def quicky_cpv_generator(cp_all_list):
12743                         for x in cp_all_list:
12744                                 for y in portdb.cp_list(x):
12745                                         yield y
12746                 source = quicky_cpv_generator(portdb.cp_all())
12747                 noise_maker = portage.cache.util.quiet_mirroring()
12748         else:
12749                 noise_maker = source = percentage_noise_maker(portdb)
12750         portage.cache.util.mirror_cache(source, cm, portdb.auxdb[myportdir],
12751                 eclass_cache=ec, verbose_instance=noise_maker)
12752
12753         sys.stdout.flush()
12754         os.umask(old_umask)
12755
12756 def action_regen(settings, portdb, max_jobs, max_load):
12757         xterm_titles = "notitles" not in settings.features
12758         emergelog(xterm_titles, " === regen")
12759         #regenerate cache entries
12760         portage.writemsg_stdout("Regenerating cache entries...\n")
12761         try:
12762                 os.close(sys.stdin.fileno())
12763         except SystemExit, e:
12764                 raise # Needed else can't exit
12765         except:
12766                 pass
12767         sys.stdout.flush()
12768
12769         regen = MetadataRegen(portdb, max_jobs=max_jobs, max_load=max_load)
12770         regen.run()
12771
12772         portage.writemsg_stdout("done!\n")
12773
12774 def action_config(settings, trees, myopts, myfiles):
12775         if len(myfiles) != 1:
12776                 print red("!!! config can only take a single package atom at this time\n")
12777                 sys.exit(1)
12778         if not is_valid_package_atom(myfiles[0]):
12779                 portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
12780                         noiselevel=-1)
12781                 portage.writemsg("!!! Please check ebuild(5) for full details.\n")
12782                 portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
12783                 sys.exit(1)
12784         print
12785         try:
12786                 pkgs = trees[settings["ROOT"]]["vartree"].dbapi.match(myfiles[0])
12787         except portage.exception.AmbiguousPackageName, e:
12788                 # Multiple matches thrown from cpv_expand
12789                 pkgs = e.args[0]
12790         if len(pkgs) == 0:
12791                 print "No packages found.\n"
12792                 sys.exit(0)
12793         elif len(pkgs) > 1:
12794                 if "--ask" in myopts:
12795                         options = []
12796                         print "Please select a package to configure:"
12797                         idx = 0
12798                         for pkg in pkgs:
12799                                 idx += 1
12800                                 options.append(str(idx))
12801                                 print options[-1]+") "+pkg
12802                         print "X) Cancel"
12803                         options.append("X")
12804                         idx = userquery("Selection?", options)
12805                         if idx == "X":
12806                                 sys.exit(0)
12807                         pkg = pkgs[int(idx)-1]
12808                 else:
12809                         print "The following packages available:"
12810                         for pkg in pkgs:
12811                                 print "* "+pkg
12812                         print "\nPlease use a specific atom or the --ask option."
12813                         sys.exit(1)
12814         else:
12815                 pkg = pkgs[0]
12816
12817         print
12818         if "--ask" in myopts:
12819                 if userquery("Ready to configure "+pkg+"?") == "No":
12820                         sys.exit(0)
12821         else:
12822                 print "Configuring pkg..."
12823         print
12824         ebuildpath = trees[settings["ROOT"]]["vartree"].dbapi.findname(pkg)
12825         mysettings = portage.config(clone=settings)
12826         vardb = trees[mysettings["ROOT"]]["vartree"].dbapi
12827         debug = mysettings.get("PORTAGE_DEBUG") == "1"
12828         retval = portage.doebuild(ebuildpath, "config", mysettings["ROOT"],
12829                 mysettings,
12830                 debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
12831                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi, tree="vartree")
12832         if retval == os.EX_OK:
12833                 portage.doebuild(ebuildpath, "clean", mysettings["ROOT"],
12834                         mysettings, debug=debug, mydbapi=vardb, tree="vartree")
12835         print
12836
12837 def action_info(settings, trees, myopts, myfiles):
12838         print getportageversion(settings["PORTDIR"], settings["ROOT"],
12839                 settings.profile_path, settings["CHOST"],
12840                 trees[settings["ROOT"]]["vartree"].dbapi)
12841         header_width = 65
12842         header_title = "System Settings"
12843         if myfiles:
12844                 print header_width * "="
12845                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12846         print header_width * "="
12847         print "System uname: "+platform.platform(aliased=1)
12848
12849         lastSync = portage.grabfile(os.path.join(
12850                 settings["PORTDIR"], "metadata", "timestamp.chk"))
12851         print "Timestamp of tree:",
12852         if lastSync:
12853                 print lastSync[0]
12854         else:
12855                 print "Unknown"
12856
12857         output=commands.getstatusoutput("distcc --version")
12858         if not output[0]:
12859                 print str(output[1].split("\n",1)[0]),
12860                 if "distcc" in settings.features:
12861                         print "[enabled]"
12862                 else:
12863                         print "[disabled]"
12864
12865         output=commands.getstatusoutput("ccache -V")
12866         if not output[0]:
12867                 print str(output[1].split("\n",1)[0]),
12868                 if "ccache" in settings.features:
12869                         print "[enabled]"
12870                 else:
12871                         print "[disabled]"
12872
12873         myvars  = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
12874                    "sys-devel/binutils", "sys-devel/libtool",  "dev-lang/python"]
12875         myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
12876         myvars  = portage.util.unique_array(myvars)
12877         myvars.sort()
12878
12879         for x in myvars:
12880                 if portage.isvalidatom(x):
12881                         pkg_matches = trees["/"]["vartree"].dbapi.match(x)
12882                         pkg_matches = [portage.catpkgsplit(cpv)[1:] for cpv in pkg_matches]
12883                         pkg_matches.sort(portage.pkgcmp)
12884                         pkgs = []
12885                         for pn, ver, rev in pkg_matches:
12886                                 if rev != "r0":
12887                                         pkgs.append(ver + "-" + rev)
12888                                 else:
12889                                         pkgs.append(ver)
12890                         if pkgs:
12891                                 pkgs = ", ".join(pkgs)
12892                                 print "%-20s %s" % (x+":", pkgs)
12893                 else:
12894                         print "%-20s %s" % (x+":", "[NOT VALID]")
12895
12896         libtool_vers = ",".join(trees["/"]["vartree"].dbapi.match("sys-devel/libtool"))
12897
12898         if "--verbose" in myopts:
12899                 myvars=settings.keys()
12900         else:
12901                 myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
12902                           'PORTDIR', 'DISTDIR', 'PKGDIR', 'PORTAGE_TMPDIR',
12903                           'PORTDIR_OVERLAY', 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
12904                           'ACCEPT_KEYWORDS', 'SYNC', 'FEATURES', 'EMERGE_DEFAULT_OPTS']
12905
12906                 myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
12907
12908         myvars = portage.util.unique_array(myvars)
12909         unset_vars = []
12910         myvars.sort()
12911         for x in myvars:
12912                 if x in settings:
12913                         if x != "USE":
12914                                 print '%s="%s"' % (x, settings[x])
12915                         else:
12916                                 use = set(settings["USE"].split())
12917                                 use_expand = settings["USE_EXPAND"].split()
12918                                 use_expand.sort()
12919                                 for varname in use_expand:
12920                                         flag_prefix = varname.lower() + "_"
12921                                         for f in list(use):
12922                                                 if f.startswith(flag_prefix):
12923                                                         use.remove(f)
12924                                 use = list(use)
12925                                 use.sort()
12926                                 print 'USE="%s"' % " ".join(use),
12927                                 for varname in use_expand:
12928                                         myval = settings.get(varname)
12929                                         if myval:
12930                                                 print '%s="%s"' % (varname, myval),
12931                                 print
12932                 else:
12933                         unset_vars.append(x)
12934         if unset_vars:
12935                 print "Unset:  "+", ".join(unset_vars)
12936         print
12937
12938         if "--debug" in myopts:
12939                 for x in dir(portage):
12940                         module = getattr(portage, x)
12941                         if "cvs_id_string" in dir(module):
12942                                 print "%s: %s" % (str(x), str(module.cvs_id_string))
12943
12944         # See if we can find any packages installed matching the strings
12945         # passed on the command line
12946         mypkgs = []
12947         vardb = trees[settings["ROOT"]]["vartree"].dbapi
12948         portdb = trees[settings["ROOT"]]["porttree"].dbapi
12949         for x in myfiles:
12950                 mypkgs.extend(vardb.match(x))
12951
12952         # If some packages were found...
12953         if mypkgs:
12954                 # Get our global settings (we only print stuff if it varies from
12955                 # the current config)
12956                 mydesiredvars = [ 'CHOST', 'CFLAGS', 'CXXFLAGS', 'LDFLAGS' ]
12957                 auxkeys = mydesiredvars + [ "USE", "IUSE"]
12958                 global_vals = {}
12959                 pkgsettings = portage.config(clone=settings)
12960
12961                 for myvar in mydesiredvars:
12962                         global_vals[myvar] = set(settings.get(myvar, "").split())
12963
12964                 # Loop through each package
12965                 # Only print settings if they differ from global settings
12966                 header_title = "Package Settings"
12967                 print header_width * "="
12968                 print header_title.rjust(int(header_width/2 + len(header_title)/2))
12969                 print header_width * "="
12970                 from portage.output import EOutput
12971                 out = EOutput()
12972                 for pkg in mypkgs:
12973                         # Get all package specific variables
12974                         auxvalues = vardb.aux_get(pkg, auxkeys)
12975                         valuesmap = {}
12976                         for i in xrange(len(auxkeys)):
12977                                 valuesmap[auxkeys[i]] = set(auxvalues[i].split())
12978                         diff_values = {}
12979                         for myvar in mydesiredvars:
12980                                 # If the package variable doesn't match the
12981                                 # current global variable, something has changed
12982                                 # so set diff_found so we know to print
12983                                 if valuesmap[myvar] != global_vals[myvar]:
12984                                         diff_values[myvar] = valuesmap[myvar]
12985                         valuesmap["IUSE"] = set(filter_iuse_defaults(valuesmap["IUSE"]))
12986                         valuesmap["USE"] = valuesmap["USE"].intersection(valuesmap["IUSE"])
12987                         pkgsettings.reset()
12988                         # If a matching ebuild is no longer available in the tree, maybe it
12989                         # would make sense to compare against the flags for the best
12990                         # available version with the same slot?
12991                         mydb = None
12992                         if portdb.cpv_exists(pkg):
12993                                 mydb = portdb
12994                         pkgsettings.setcpv(pkg, mydb=mydb)
12995                         if valuesmap["IUSE"].intersection(
12996                                 pkgsettings["PORTAGE_USE"].split()) != valuesmap["USE"]:
12997                                 diff_values["USE"] = valuesmap["USE"]
12998                         # If a difference was found, print the info for
12999                         # this package.
13000                         if diff_values:
13001                                 # Print package info
13002                                 print "%s was built with the following:" % pkg
13003                                 for myvar in mydesiredvars + ["USE"]:
13004                                         if myvar in diff_values:
13005                                                 mylist = list(diff_values[myvar])
13006                                                 mylist.sort()
13007                                                 print "%s=\"%s\"" % (myvar, " ".join(mylist))
13008                                 print
13009                         print ">>> Attempting to run pkg_info() for '%s'" % pkg
13010                         ebuildpath = vardb.findname(pkg)
13011                         if not ebuildpath or not os.path.exists(ebuildpath):
13012                                 out.ewarn("No ebuild found for '%s'" % pkg)
13013                                 continue
13014                         portage.doebuild(ebuildpath, "info", pkgsettings["ROOT"],
13015                                 pkgsettings, debug=(settings.get("PORTAGE_DEBUG", "") == 1),
13016                                 mydbapi=trees[settings["ROOT"]]["vartree"].dbapi,
13017                                 tree="vartree")
13018
13019 def action_search(root_config, myopts, myfiles, spinner):
13020         if not myfiles:
13021                 print "emerge: no search terms provided."
13022         else:
13023                 searchinstance = search(root_config,
13024                         spinner, "--searchdesc" in myopts,
13025                         "--quiet" not in myopts, "--usepkg" in myopts,
13026                         "--usepkgonly" in myopts)
13027                 for mysearch in myfiles:
13028                         try:
13029                                 searchinstance.execute(mysearch)
13030                         except re.error, comment:
13031                                 print "\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment )
13032                                 sys.exit(1)
13033                         searchinstance.output()
13034
13035 def action_depclean(settings, trees, ldpath_mtimes,
13036         myopts, action, myfiles, spinner):
13037         # Kill packages that aren't explicitly merged or are required as a
13038         # dependency of another package. World file is explicit.
13039
13040         # Global depclean or prune operations are not very safe when there are
13041         # missing dependencies since it's unknown how badly incomplete
13042         # the dependency graph is, and we might accidentally remove packages
13043         # that should have been pulled into the graph. On the other hand, it's
13044         # relatively safe to ignore missing deps when only asked to remove
13045         # specific packages.
13046         allow_missing_deps = len(myfiles) > 0
13047
13048         msg = []
13049         msg.append("Always study the list of packages to be cleaned for any obvious\n")
13050         msg.append("mistakes. Packages that are part of the world set will always\n")
13051         msg.append("be kept.  They can be manually added to this set with\n")
13052         msg.append(good("`emerge --noreplace <atom>`") + ".  Packages that are listed in\n")
13053         msg.append("package.provided (see portage(5)) will be removed by\n")
13054         msg.append("depclean, even if they are part of the world set.\n")
13055         msg.append("\n")
13056         msg.append("As a safety measure, depclean will not remove any packages\n")
13057         msg.append("unless *all* required dependencies have been resolved.  As a\n")
13058         msg.append("consequence, it is often necessary to run %s\n" % \
13059                 good("`emerge --update"))
13060         msg.append(good("--newuse --deep @system @world`") + \
13061                 " prior to depclean.\n")
13062
13063         if action == "depclean" and "--quiet" not in myopts and not myfiles:
13064                 portage.writemsg_stdout("\n")
13065                 for x in msg:
13066                         portage.writemsg_stdout(colorize("WARN", " * ") + x)
13067
13068         xterm_titles = "notitles" not in settings.features
13069         myroot = settings["ROOT"]
13070         root_config = trees[myroot]["root_config"]
13071         getSetAtoms = root_config.setconfig.getSetAtoms
13072         vardb = trees[myroot]["vartree"].dbapi
13073
13074         required_set_names = ("system", "world")
13075         required_sets = {}
13076         set_args = []
13077
13078         for s in required_set_names:
13079                 required_sets[s] = InternalPackageSet(
13080                         initial_atoms=getSetAtoms(s))
13081
13082         
13083         # When removing packages, use a temporary version of world
13084         # which excludes packages that are intended to be eligible for
13085         # removal.
13086         world_temp_set = required_sets["world"]
13087         system_set = required_sets["system"]
13088
13089         if not system_set or not world_temp_set:
13090
13091                 if not system_set:
13092                         writemsg_level("!!! You have no system list.\n",
13093                                 level=logging.ERROR, noiselevel=-1)
13094
13095                 if not world_temp_set:
13096                         writemsg_level("!!! You have no world file.\n",
13097                                         level=logging.WARNING, noiselevel=-1)
13098
13099                 writemsg_level("!!! Proceeding is likely to " + \
13100                         "break your installation.\n",
13101                         level=logging.WARNING, noiselevel=-1)
13102                 if "--pretend" not in myopts:
13103                         countdown(int(settings["EMERGE_WARNING_DELAY"]), ">>> Depclean")
13104
13105         if action == "depclean":
13106                 emergelog(xterm_titles, " >>> depclean")
13107
13108         import textwrap
13109         args_set = InternalPackageSet()
13110         if myfiles:
13111                 for x in myfiles:
13112                         if not is_valid_package_atom(x):
13113                                 writemsg_level("!!! '%s' is not a valid package atom.\n" % x,
13114                                         level=logging.ERROR, noiselevel=-1)
13115                                 writemsg_level("!!! Please check ebuild(5) for full details.\n")
13116                                 return
13117                         try:
13118                                 atom = portage.dep_expand(x, mydb=vardb, settings=settings)
13119                         except portage.exception.AmbiguousPackageName, e:
13120                                 msg = "The short ebuild name \"" + x + \
13121                                         "\" is ambiguous.  Please specify " + \
13122                                         "one of the following " + \
13123                                         "fully-qualified ebuild names instead:"
13124                                 for line in textwrap.wrap(msg, 70):
13125                                         writemsg_level("!!! %s\n" % (line,),
13126                                                 level=logging.ERROR, noiselevel=-1)
13127                                 for i in e[0]:
13128                                         writemsg_level("    %s\n" % colorize("INFORM", i),
13129                                                 level=logging.ERROR, noiselevel=-1)
13130                                 writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
13131                                 return
13132                         args_set.add(atom)
13133                 matched_packages = False
13134                 for x in args_set:
13135                         if vardb.match(x):
13136                                 matched_packages = True
13137                                 break
13138                 if not matched_packages:
13139                         writemsg_level(">>> No packages selected for removal by %s\n" % \
13140                                 action)
13141                         return
13142
13143         writemsg_level("\nCalculating dependencies  ")
13144         resolver_params = create_depgraph_params(myopts, "remove")
13145         resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
13146         vardb = resolver.trees[myroot]["vartree"].dbapi
13147
13148         if action == "depclean":
13149
13150                 if args_set:
13151                         # Pull in everything that's installed but not matched
13152                         # by an argument atom since we don't want to clean any
13153                         # package if something depends on it.
13154
13155                         world_temp_set.clear()
13156                         for pkg in vardb:
13157                                 spinner.update()
13158
13159                                 try:
13160                                         if args_set.findAtomForPackage(pkg) is None:
13161                                                 world_temp_set.add("=" + pkg.cpv)
13162                                                 continue
13163                                 except portage.exception.InvalidDependString, e:
13164                                         show_invalid_depstring_notice(pkg,
13165                                                 pkg.metadata["PROVIDE"], str(e))
13166                                         del e
13167                                         world_temp_set.add("=" + pkg.cpv)
13168                                         continue
13169
13170         elif action == "prune":
13171
13172                 # Pull in everything that's installed since we don't
13173                 # to prune a package if something depends on it.
13174                 world_temp_set.clear()
13175                 world_temp_set.update(vardb.cp_all())
13176
13177                 if not args_set:
13178
13179                         # Try to prune everything that's slotted.
13180                         for cp in vardb.cp_all():
13181                                 if len(vardb.cp_list(cp)) > 1:
13182                                         args_set.add(cp)
13183
13184                 # Remove atoms from world that match installed packages
13185                 # that are also matched by argument atoms, but do not remove
13186                 # them if they match the highest installed version.
13187                 for pkg in vardb:
13188                         spinner.update()
13189                         pkgs_for_cp = vardb.match_pkgs(pkg.cp)
13190                         if not pkgs_for_cp or pkg not in pkgs_for_cp:
13191                                 raise AssertionError("package expected in matches: " + \
13192                                         "cp = %s, cpv = %s matches = %s" % \
13193                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13194
13195                         highest_version = pkgs_for_cp[-1]
13196                         if pkg == highest_version:
13197                                 # pkg is the highest version
13198                                 world_temp_set.add("=" + pkg.cpv)
13199                                 continue
13200
13201                         if len(pkgs_for_cp) <= 1:
13202                                 raise AssertionError("more packages expected: " + \
13203                                         "cp = %s, cpv = %s matches = %s" % \
13204                                         (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
13205
13206                         try:
13207                                 if args_set.findAtomForPackage(pkg) is None:
13208                                         world_temp_set.add("=" + pkg.cpv)
13209                                         continue
13210                         except portage.exception.InvalidDependString, e:
13211                                 show_invalid_depstring_notice(pkg,
13212                                         pkg.metadata["PROVIDE"], str(e))
13213                                 del e
13214                                 world_temp_set.add("=" + pkg.cpv)
13215                                 continue
13216
13217         set_args = {}
13218         for s, package_set in required_sets.iteritems():
13219                 set_atom = SETPREFIX + s
13220                 set_arg = SetArg(arg=set_atom, set=package_set,
13221                         root_config=resolver.roots[myroot])
13222                 set_args[s] = set_arg
13223                 for atom in set_arg.set:
13224                         resolver._dep_stack.append(
13225                                 Dependency(atom=atom, root=myroot, parent=set_arg))
13226                         resolver.digraph.add(set_arg, None)
13227
13228         success = resolver._complete_graph()
13229         writemsg_level("\b\b... done!\n")
13230
13231         resolver.display_problems()
13232
13233         if not success:
13234                 return 1
13235
13236         def unresolved_deps():
13237
13238                 unresolvable = set()
13239                 for dep in resolver._initially_unsatisfied_deps:
13240                         if isinstance(dep.parent, Package) and \
13241                                 (dep.priority > UnmergeDepPriority.SOFT):
13242                                 unresolvable.add((dep.atom, dep.parent.cpv))
13243
13244                 if not unresolvable:
13245                         return False
13246
13247                 if unresolvable and not allow_missing_deps:
13248                         prefix = bad(" * ")
13249                         msg = []
13250                         msg.append("Dependencies could not be completely resolved due to")
13251                         msg.append("the following required packages not being installed:")
13252                         msg.append("")
13253                         for atom, parent in unresolvable:
13254                                 msg.append("  %s pulled in by:" % (atom,))
13255                                 msg.append("    %s" % (parent,))
13256                                 msg.append("")
13257                         msg.append("Have you forgotten to run " + \
13258                                 good("`emerge --update --newuse --deep @system @world`") + " prior")
13259                         msg.append(("to %s? It may be necessary to manually " + \
13260                                 "uninstall packages that no longer") % action)
13261                         msg.append("exist in the portage tree since " + \
13262                                 "it may not be possible to satisfy their")
13263                         msg.append("dependencies.  Also, be aware of " + \
13264                                 "the --with-bdeps option that is documented")
13265                         msg.append("in " + good("`man emerge`") + ".")
13266                         if action == "prune":
13267                                 msg.append("")
13268                                 msg.append("If you would like to ignore " + \
13269                                         "dependencies then use %s." % good("--nodeps"))
13270                         writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
13271                                 level=logging.ERROR, noiselevel=-1)
13272                         return True
13273                 return False
13274
13275         if unresolved_deps():
13276                 return 1
13277
13278         graph = resolver.digraph.copy()
13279         required_pkgs_total = 0
13280         for node in graph:
13281                 if isinstance(node, Package):
13282                         required_pkgs_total += 1
13283
13284         def show_parents(child_node):
13285                 parent_nodes = graph.parent_nodes(child_node)
13286                 if not parent_nodes:
13287                         # With --prune, the highest version can be pulled in without any
13288                         # real parent since all installed packages are pulled in.  In that
13289                         # case there's nothing to show here.
13290                         return
13291                 parent_strs = []
13292                 for node in parent_nodes:
13293                         parent_strs.append(str(getattr(node, "cpv", node)))
13294                 parent_strs.sort()
13295                 msg = []
13296                 msg.append("  %s pulled in by:\n" % (child_node.cpv,))
13297                 for parent_str in parent_strs:
13298                         msg.append("    %s\n" % (parent_str,))
13299                 msg.append("\n")
13300                 portage.writemsg_stdout("".join(msg), noiselevel=-1)
13301
13302         def create_cleanlist():
13303                 pkgs_to_remove = []
13304
13305                 if action == "depclean":
13306                         if args_set:
13307
13308                                 for pkg in vardb:
13309                                         arg_atom = None
13310                                         try:
13311                                                 arg_atom = args_set.findAtomForPackage(pkg)
13312                                         except portage.exception.InvalidDependString:
13313                                                 # this error has already been displayed by now
13314                                                 continue
13315
13316                                         if arg_atom:
13317                                                 if pkg not in graph:
13318                                                         pkgs_to_remove.append(pkg)
13319                                                 elif "--verbose" in myopts:
13320                                                         show_parents(pkg)
13321
13322                         else:
13323                                 for pkg in vardb:
13324                                         if pkg not in graph:
13325                                                 pkgs_to_remove.append(pkg)
13326                                         elif "--verbose" in myopts:
13327                                                 show_parents(pkg)
13328
13329                 elif action == "prune":
13330                         # Prune really uses all installed instead of world. It's not
13331                         # a real reverse dependency so don't display it as such.
13332                         graph.remove(set_args["world"])
13333
13334                         for atom in args_set:
13335                                 for pkg in vardb.match_pkgs(atom):
13336                                         if pkg not in graph:
13337                                                 pkgs_to_remove.append(pkg)
13338                                         elif "--verbose" in myopts:
13339                                                 show_parents(pkg)
13340
13341                 if not pkgs_to_remove:
13342                         writemsg_level(
13343                                 ">>> No packages selected for removal by %s\n" % action)
13344                         if "--verbose" not in myopts:
13345                                 writemsg_level(
13346                                         ">>> To see reverse dependencies, use %s\n" % \
13347                                                 good("--verbose"))
13348                         if action == "prune":
13349                                 writemsg_level(
13350                                         ">>> To ignore dependencies, use %s\n" % \
13351                                                 good("--nodeps"))
13352
13353                 return pkgs_to_remove
13354
13355         cleanlist = create_cleanlist()
13356
13357         if len(cleanlist):
13358                 clean_set = set(cleanlist)
13359
13360                 # Check if any of these package are the sole providers of libraries
13361                 # with consumers that have not been selected for removal. If so, these
13362                 # packages and any dependencies need to be added to the graph.
13363                 real_vardb = trees[myroot]["vartree"].dbapi
13364                 linkmap = real_vardb.linkmap
13365                 liblist = linkmap.listLibraryObjects()
13366                 consumer_cache = {}
13367                 provider_cache = {}
13368                 soname_cache = {}
13369                 consumer_map = {}
13370
13371                 writemsg_level(">>> Checking for lib consumers...\n")
13372
13373                 for pkg in cleanlist:
13374                         pkg_dblink = real_vardb._dblink(pkg.cpv)
13375                         provided_libs = set()
13376
13377                         for lib in liblist:
13378                                 if pkg_dblink.isowner(lib, myroot):
13379                                         provided_libs.add(lib)
13380
13381                         if not provided_libs:
13382                                 continue
13383
13384                         consumers = {}
13385                         for lib in provided_libs:
13386                                 lib_consumers = consumer_cache.get(lib)
13387                                 if lib_consumers is None:
13388                                         lib_consumers = linkmap.findConsumers(lib)
13389                                         consumer_cache[lib] = lib_consumers
13390                                 if lib_consumers:
13391                                         consumers[lib] = lib_consumers
13392
13393                         if not consumers:
13394                                 continue
13395
13396                         for lib, lib_consumers in consumers.items():
13397                                 for consumer_file in list(lib_consumers):
13398                                         if pkg_dblink.isowner(consumer_file, myroot):
13399                                                 lib_consumers.remove(consumer_file)
13400                                 if not lib_consumers:
13401                                         del consumers[lib]
13402
13403                         if not consumers:
13404                                 continue
13405
13406                         for lib, lib_consumers in consumers.iteritems():
13407
13408                                 soname = soname_cache.get(lib)
13409                                 if soname is None:
13410                                         soname = linkmap.getSoname(lib)
13411                                         soname_cache[lib] = soname
13412
13413                                 consumer_providers = []
13414                                 for lib_consumer in lib_consumers:
13415                                         providers = provider_cache.get(lib)
13416                                         if providers is None:
13417                                                 providers = linkmap.findProviders(lib_consumer)
13418                                                 provider_cache[lib_consumer] = providers
13419                                         if soname not in providers:
13420                                                 # Why does this happen?
13421                                                 continue
13422                                         consumer_providers.append(
13423                                                 (lib_consumer, providers[soname]))
13424
13425                                 consumers[lib] = consumer_providers
13426
13427                         consumer_map[pkg] = consumers
13428
13429                 if consumer_map:
13430
13431                         search_files = set()
13432                         for consumers in consumer_map.itervalues():
13433                                 for lib, consumer_providers in consumers.iteritems():
13434                                         for lib_consumer, providers in consumer_providers:
13435                                                 search_files.add(lib_consumer)
13436                                                 search_files.update(providers)
13437
13438                         writemsg_level(">>> Assigning files to packages...\n")
13439                         file_owners = real_vardb._owners.getFileOwnerMap(search_files)
13440
13441                         for pkg, consumers in consumer_map.items():
13442                                 for lib, consumer_providers in consumers.items():
13443                                         lib_consumers = set()
13444
13445                                         for lib_consumer, providers in consumer_providers:
13446                                                 owner_set = file_owners.get(lib_consumer)
13447                                                 provider_dblinks = set()
13448                                                 provider_pkgs = set()
13449
13450                                                 if len(providers) > 1:
13451                                                         for provider in providers:
13452                                                                 provider_set = file_owners.get(provider)
13453                                                                 if provider_set is not None:
13454                                                                         provider_dblinks.update(provider_set)
13455
13456                                                 if len(provider_dblinks) > 1:
13457                                                         for provider_dblink in provider_dblinks:
13458                                                                 pkg_key = ("installed", myroot,
13459                                                                         provider_dblink.mycpv, "nomerge")
13460                                                                 if pkg_key not in clean_set:
13461                                                                         provider_pkgs.add(vardb.get(pkg_key))
13462
13463                                                 if provider_pkgs:
13464                                                         continue
13465
13466                                                 if owner_set is not None:
13467                                                         lib_consumers.update(owner_set)
13468
13469                                         for consumer_dblink in list(lib_consumers):
13470                                                 if ("installed", myroot, consumer_dblink.mycpv,
13471                                                         "nomerge") in clean_set:
13472                                                         lib_consumers.remove(consumer_dblink)
13473                                                         continue
13474
13475                                         if lib_consumers:
13476                                                 consumers[lib] = lib_consumers
13477                                         else:
13478                                                 del consumers[lib]
13479                                 if not consumers:
13480                                         del consumer_map[pkg]
13481
13482                 if consumer_map:
13483                         # TODO: Implement a package set for rebuilding consumer packages.
13484
13485                         msg = "In order to avoid breakage of link level " + \
13486                                 "dependencies, one or more packages will not be removed. " + \
13487                                 "This can be solved by rebuilding " + \
13488                                 "the packages that pulled them in."
13489
13490                         prefix = bad(" * ")
13491                         from textwrap import wrap
13492                         writemsg_level("".join(prefix + "%s\n" % line for \
13493                                 line in wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
13494
13495                         msg = []
13496                         for pkg, consumers in consumer_map.iteritems():
13497                                 unique_consumers = set(chain(*consumers.values()))
13498                                 unique_consumers = sorted(consumer.mycpv \
13499                                         for consumer in unique_consumers)
13500                                 msg.append("")
13501                                 msg.append("  %s pulled in by:" % (pkg.cpv,))
13502                                 for consumer in unique_consumers:
13503                                         msg.append("    %s" % (consumer,))
13504                         msg.append("")
13505                         writemsg_level("".join(prefix + "%s\n" % line for line in msg),
13506                                 level=logging.WARNING, noiselevel=-1)
13507
13508                         # Add lib providers to the graph as children of lib consumers,
13509                         # and also add any dependencies pulled in by the provider.
13510                         writemsg_level(">>> Adding lib providers to graph...\n")
13511
13512                         for pkg, consumers in consumer_map.iteritems():
13513                                 for consumer_dblink in set(chain(*consumers.values())):
13514                                         consumer_pkg = vardb.get(("installed", myroot,
13515                                                 consumer_dblink.mycpv, "nomerge"))
13516                                         if not resolver._add_pkg(pkg,
13517                                                 Dependency(parent=consumer_pkg,
13518                                                 priority=UnmergeDepPriority(runtime=True),
13519                                                 root=pkg.root)):
13520                                                 resolver.display_problems()
13521                                                 return 1
13522
13523                         writemsg_level("\nCalculating dependencies  ")
13524                         success = resolver._complete_graph()
13525                         writemsg_level("\b\b... done!\n")
13526                         resolver.display_problems()
13527                         if not success:
13528                                 return 1
13529                         if unresolved_deps():
13530                                 return 1
13531
13532                         graph = resolver.digraph.copy()
13533                         required_pkgs_total = 0
13534                         for node in graph:
13535                                 if isinstance(node, Package):
13536                                         required_pkgs_total += 1
13537                         cleanlist = create_cleanlist()
13538                         if not cleanlist:
13539                                 return 0
13540                         clean_set = set(cleanlist)
13541
13542                 # Use a topological sort to create an unmerge order such that
13543                 # each package is unmerged before it's dependencies. This is
13544                 # necessary to avoid breaking things that may need to run
13545                 # during pkg_prerm or pkg_postrm phases.
13546
13547                 # Create a new graph to account for dependencies between the
13548                 # packages being unmerged.
13549                 graph = digraph()
13550                 del cleanlist[:]
13551
13552                 dep_keys = ["DEPEND", "RDEPEND", "PDEPEND"]
13553                 runtime = UnmergeDepPriority(runtime=True)
13554                 runtime_post = UnmergeDepPriority(runtime_post=True)
13555                 buildtime = UnmergeDepPriority(buildtime=True)
13556                 priority_map = {
13557                         "RDEPEND": runtime,
13558                         "PDEPEND": runtime_post,
13559                         "DEPEND": buildtime,
13560                 }
13561
13562                 for node in clean_set:
13563                         graph.add(node, None)
13564                         mydeps = []
13565                         node_use = node.metadata["USE"].split()
13566                         for dep_type in dep_keys:
13567                                 depstr = node.metadata[dep_type]
13568                                 if not depstr:
13569                                         continue
13570                                 try:
13571                                         portage.dep._dep_check_strict = False
13572                                         success, atoms = portage.dep_check(depstr, None, settings,
13573                                                 myuse=node_use, trees=resolver._graph_trees,
13574                                                 myroot=myroot)
13575                                 finally:
13576                                         portage.dep._dep_check_strict = True
13577                                 if not success:
13578                                         # Ignore invalid deps of packages that will
13579                                         # be uninstalled anyway.
13580                                         continue
13581
13582                                 priority = priority_map[dep_type]
13583                                 for atom in atoms:
13584                                         if not isinstance(atom, portage.dep.Atom):
13585                                                 # Ignore invalid atoms returned from dep_check().
13586                                                 continue
13587                                         if atom.blocker:
13588                                                 continue
13589                                         matches = vardb.match_pkgs(atom)
13590                                         if not matches:
13591                                                 continue
13592                                         for child_node in matches:
13593                                                 if child_node in clean_set:
13594                                                         graph.add(child_node, node, priority=priority)
13595
13596                 ordered = True
13597                 if len(graph.order) == len(graph.root_nodes()):
13598                         # If there are no dependencies between packages
13599                         # let unmerge() group them by cat/pn.
13600                         ordered = False
13601                         cleanlist = [pkg.cpv for pkg in graph.order]
13602                 else:
13603                         # Order nodes from lowest to highest overall reference count for
13604                         # optimal root node selection.
13605                         node_refcounts = {}
13606                         for node in graph.order:
13607                                 node_refcounts[node] = len(graph.parent_nodes(node))
13608                         def cmp_reference_count(node1, node2):
13609                                 return node_refcounts[node1] - node_refcounts[node2]
13610                         graph.order.sort(cmp_reference_count)
13611         
13612                         ignore_priority_range = [None]
13613                         ignore_priority_range.extend(
13614                                 xrange(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
13615                         while not graph.empty():
13616                                 for ignore_priority in ignore_priority_range:
13617                                         nodes = graph.root_nodes(ignore_priority=ignore_priority)
13618                                         if nodes:
13619                                                 break
13620                                 if not nodes:
13621                                         raise AssertionError("no root nodes")
13622                                 if ignore_priority is not None:
13623                                         # Some deps have been dropped due to circular dependencies,
13624                                         # so only pop one node in order do minimize the number that
13625                                         # are dropped.
13626                                         del nodes[1:]
13627                                 for node in nodes:
13628                                         graph.remove(node)
13629                                         cleanlist.append(node.cpv)
13630
13631                 unmerge(root_config, myopts, "unmerge", cleanlist,
13632                         ldpath_mtimes, ordered=ordered)
13633
13634         if action == "prune":
13635                 return
13636
13637         if not cleanlist and "--quiet" in myopts:
13638                 return
13639
13640         print "Packages installed:   "+str(len(vardb.cpv_all()))
13641         print "Packages in world:    " + \
13642                 str(len(root_config.sets["world"].getAtoms()))
13643         print "Packages in system:   " + \
13644                 str(len(root_config.sets["system"].getAtoms()))
13645         print "Required packages:    "+str(required_pkgs_total)
13646         if "--pretend" in myopts:
13647                 print "Number to remove:     "+str(len(cleanlist))
13648         else:
13649                 print "Number removed:       "+str(len(cleanlist))
13650
13651 def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner,
13652         skip_masked=False, skip_unsatisfied=False):
13653         """
13654         Construct a depgraph for the given resume list. This will raise
13655         PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
13656         @rtype: tuple
13657         @returns: (success, depgraph, dropped_tasks)
13658         """
13659         mergelist = mtimedb["resume"]["mergelist"]
13660         dropped_tasks = set()
13661         while True:
13662                 mydepgraph = depgraph(settings, trees,
13663                         myopts, myparams, spinner)
13664                 try:
13665                         success = mydepgraph.loadResumeCommand(mtimedb["resume"],
13666                                 skip_masked=skip_masked)
13667                 except depgraph.UnsatisfiedResumeDep, e:
13668                         if not skip_unsatisfied:
13669                                 raise
13670
13671                         graph = mydepgraph.digraph
13672                         unsatisfied_parents = dict((dep.parent, dep.parent) \
13673                                 for dep in e.value)
13674                         traversed_nodes = set()
13675                         unsatisfied_stack = list(unsatisfied_parents)
13676                         while unsatisfied_stack:
13677                                 pkg = unsatisfied_stack.pop()
13678                                 if pkg in traversed_nodes:
13679                                         continue
13680                                 traversed_nodes.add(pkg)
13681
13682                                 # If this package was pulled in by a parent
13683                                 # package scheduled for merge, removing this
13684                                 # package may cause the the parent package's
13685                                 # dependency to become unsatisfied.
13686                                 for parent_node in graph.parent_nodes(pkg):
13687                                         if not isinstance(parent_node, Package) \
13688                                                 or parent_node.operation not in ("merge", "nomerge"):
13689                                                 continue
13690                                         unsatisfied = \
13691                                                 graph.child_nodes(parent_node,
13692                                                 ignore_priority=DepPriority.SOFT)
13693                                         if pkg in unsatisfied:
13694                                                 unsatisfied_parents[parent_node] = parent_node
13695                                                 unsatisfied_stack.append(parent_node)
13696
13697                         pruned_mergelist = [x for x in mergelist \
13698                                 if isinstance(x, list) and \
13699                                 tuple(x) not in unsatisfied_parents]
13700
13701                         # If the mergelist doesn't shrink then this loop is infinite.
13702                         if len(pruned_mergelist) == len(mergelist):
13703                                 # This happens if a package can't be dropped because
13704                                 # it's already installed, but it has unsatisfied PDEPEND.
13705                                 raise
13706                         mergelist[:] = pruned_mergelist
13707
13708                         # Exclude installed packages that have been removed from the graph due
13709                         # to failure to build/install runtime dependencies after the dependent
13710                         # package has already been installed.
13711                         dropped_tasks.update(pkg for pkg in \
13712                                 unsatisfied_parents if pkg.operation != "nomerge")
13713                         mydepgraph.break_refs(unsatisfied_parents)
13714
13715                         del e, graph, traversed_nodes, \
13716                                 unsatisfied_parents, unsatisfied_stack
13717                         continue
13718                 else:
13719                         break
13720         return (success, mydepgraph, dropped_tasks)
13721
13722 def action_build(settings, trees, mtimedb,
13723         myopts, myaction, myfiles, spinner):
13724
13725         # validate the state of the resume data
13726         # so that we can make assumptions later.
13727         for k in ("resume", "resume_backup"):
13728                 if k not in mtimedb:
13729                         continue
13730                 resume_data = mtimedb[k]
13731                 if not isinstance(resume_data, dict):
13732                         del mtimedb[k]
13733                         continue
13734                 mergelist = resume_data.get("mergelist")
13735                 if not isinstance(mergelist, list):
13736                         del mtimedb[k]
13737                         continue
13738                 for x in mergelist:
13739                         if not (isinstance(x, list) and len(x) == 4):
13740                                 continue
13741                         pkg_type, pkg_root, pkg_key, pkg_action = x
13742                         if pkg_root not in trees:
13743                                 # Current $ROOT setting differs,
13744                                 # so the list must be stale.
13745                                 mergelist = None
13746                                 break
13747                 if not mergelist:
13748                         del mtimedb[k]
13749                         continue
13750                 resume_opts = resume_data.get("myopts")
13751                 if not isinstance(resume_opts, (dict, list)):
13752                         del mtimedb[k]
13753                         continue
13754                 favorites = resume_data.get("favorites")
13755                 if not isinstance(favorites, list):
13756                         del mtimedb[k]
13757                         continue
13758
13759         resume = False
13760         if "--resume" in myopts and \
13761                 ("resume" in mtimedb or
13762                 "resume_backup" in mtimedb):
13763                 resume = True
13764                 if "resume" not in mtimedb:
13765                         mtimedb["resume"] = mtimedb["resume_backup"]
13766                         del mtimedb["resume_backup"]
13767                         mtimedb.commit()
13768                 # "myopts" is a list for backward compatibility.
13769                 resume_opts = mtimedb["resume"].get("myopts", [])
13770                 if isinstance(resume_opts, list):
13771                         resume_opts = dict((k,True) for k in resume_opts)
13772                 for opt in ("--ask", "--color", "--skipfirst", "--tree"):
13773                         resume_opts.pop(opt, None)
13774                 myopts.update(resume_opts)
13775
13776                 if "--debug" in myopts:
13777                         writemsg_level("myopts %s\n" % (myopts,))
13778
13779                 # Adjust config according to options of the command being resumed.
13780                 for myroot in trees:
13781                         mysettings =  trees[myroot]["vartree"].settings
13782                         mysettings.unlock()
13783                         adjust_config(myopts, mysettings)
13784                         mysettings.lock()
13785                         del myroot, mysettings
13786
13787         ldpath_mtimes = mtimedb["ldpath"]
13788         favorites=[]
13789         merge_count = 0
13790         buildpkgonly = "--buildpkgonly" in myopts
13791         pretend = "--pretend" in myopts
13792         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
13793         ask = "--ask" in myopts
13794         nodeps = "--nodeps" in myopts
13795         oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
13796         tree = "--tree" in myopts
13797         if nodeps and tree:
13798                 tree = False
13799                 del myopts["--tree"]
13800                 portage.writemsg(colorize("WARN", " * ") + \
13801                         "--tree is broken with --nodeps. Disabling...\n")
13802         debug = "--debug" in myopts
13803         verbose = "--verbose" in myopts
13804         quiet = "--quiet" in myopts
13805         if pretend or fetchonly:
13806                 # make the mtimedb readonly
13807                 mtimedb.filename = None
13808         if "--digest" in myopts:
13809                 msg = "The --digest option can prevent corruption from being" + \
13810                         " noticed. The `repoman manifest` command is the preferred" + \
13811                         " way to generate manifests and it is capable of doing an" + \
13812                         " entire repository or category at once."
13813                 prefix = bad(" * ")
13814                 writemsg(prefix + "\n")
13815                 from textwrap import wrap
13816                 for line in wrap(msg, 72):
13817                         writemsg("%s%s\n" % (prefix, line))
13818                 writemsg(prefix + "\n")
13819
13820         if "--quiet" not in myopts and \
13821                 ("--pretend" in myopts or "--ask" in myopts or \
13822                 "--tree" in myopts or "--verbose" in myopts):
13823                 action = ""
13824                 if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
13825                         action = "fetched"
13826                 elif "--buildpkgonly" in myopts:
13827                         action = "built"
13828                 else:
13829                         action = "merged"
13830                 if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
13831                         print
13832                         print darkgreen("These are the packages that would be %s, in reverse order:") % action
13833                         print
13834                 else:
13835                         print
13836                         print darkgreen("These are the packages that would be %s, in order:") % action
13837                         print
13838
13839         show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
13840         if not show_spinner:
13841                 spinner.update = spinner.update_quiet
13842
13843         if resume:
13844                 favorites = mtimedb["resume"].get("favorites")
13845                 if not isinstance(favorites, list):
13846                         favorites = []
13847
13848                 if show_spinner:
13849                         print "Calculating dependencies  ",
13850                 myparams = create_depgraph_params(myopts, myaction)
13851
13852                 resume_data = mtimedb["resume"]
13853                 mergelist = resume_data["mergelist"]
13854                 if mergelist and "--skipfirst" in myopts:
13855                         for i, task in enumerate(mergelist):
13856                                 if isinstance(task, list) and \
13857                                         task and task[-1] == "merge":
13858                                         del mergelist[i]
13859                                         break
13860
13861                 skip_masked      = "--skipfirst" in myopts
13862                 skip_unsatisfied = "--skipfirst" in myopts
13863                 success = False
13864                 mydepgraph = None
13865                 try:
13866                         success, mydepgraph, dropped_tasks = resume_depgraph(
13867                                 settings, trees, mtimedb, myopts, myparams, spinner,
13868                                 skip_masked=skip_masked, skip_unsatisfied=skip_unsatisfied)
13869                 except (portage.exception.PackageNotFound,
13870                         depgraph.UnsatisfiedResumeDep), e:
13871                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13872                                 mydepgraph = e.depgraph
13873                         if show_spinner:
13874                                 print
13875                         from textwrap import wrap
13876                         from portage.output import EOutput
13877                         out = EOutput()
13878
13879                         resume_data = mtimedb["resume"]
13880                         mergelist = resume_data.get("mergelist")
13881                         if not isinstance(mergelist, list):
13882                                 mergelist = []
13883                         if mergelist and debug or (verbose and not quiet):
13884                                 out.eerror("Invalid resume list:")
13885                                 out.eerror("")
13886                                 indent = "  "
13887                                 for task in mergelist:
13888                                         if isinstance(task, list):
13889                                                 out.eerror(indent + str(tuple(task)))
13890                                 out.eerror("")
13891
13892                         if isinstance(e, depgraph.UnsatisfiedResumeDep):
13893                                 out.eerror("One or more packages are either masked or " + \
13894                                         "have missing dependencies:")
13895                                 out.eerror("")
13896                                 indent = "  "
13897                                 for dep in e.value:
13898                                         if dep.atom is None:
13899                                                 out.eerror(indent + "Masked package:")
13900                                                 out.eerror(2 * indent + str(dep.parent))
13901                                                 out.eerror("")
13902                                         else:
13903                                                 out.eerror(indent + str(dep.atom) + " pulled in by:")
13904                                                 out.eerror(2 * indent + str(dep.parent))
13905                                                 out.eerror("")
13906                                 msg = "The resume list contains packages " + \
13907                                         "that are either masked or have " + \
13908                                         "unsatisfied dependencies. " + \
13909                                         "Please restart/continue " + \
13910                                         "the operation manually, or use --skipfirst " + \
13911                                         "to skip the first package in the list and " + \
13912                                         "any other packages that may be " + \
13913                                         "masked or have missing dependencies."
13914                                 for line in wrap(msg, 72):
13915                                         out.eerror(line)
13916                         elif isinstance(e, portage.exception.PackageNotFound):
13917                                 out.eerror("An expected package is " + \
13918                                         "not available: %s" % str(e))
13919                                 out.eerror("")
13920                                 msg = "The resume list contains one or more " + \
13921                                         "packages that are no longer " + \
13922                                         "available. Please restart/continue " + \
13923                                         "the operation manually."
13924                                 for line in wrap(msg, 72):
13925                                         out.eerror(line)
13926                 else:
13927                         if show_spinner:
13928                                 print "\b\b... done!"
13929
13930                 if success:
13931                         if dropped_tasks:
13932                                 portage.writemsg("!!! One or more packages have been " + \
13933                                         "dropped due to\n" + \
13934                                         "!!! masking or unsatisfied dependencies:\n\n",
13935                                         noiselevel=-1)
13936                                 for task in dropped_tasks:
13937                                         portage.writemsg("  " + str(task) + "\n", noiselevel=-1)
13938                                 portage.writemsg("\n", noiselevel=-1)
13939                         del dropped_tasks
13940                 else:
13941                         if mydepgraph is not None:
13942                                 mydepgraph.display_problems()
13943                         if not (ask or pretend):
13944                                 # delete the current list and also the backup
13945                                 # since it's probably stale too.
13946                                 for k in ("resume", "resume_backup"):
13947                                         mtimedb.pop(k, None)
13948                                 mtimedb.commit()
13949
13950                         return 1
13951         else:
13952                 if ("--resume" in myopts):
13953                         print darkgreen("emerge: It seems we have nothing to resume...")
13954                         return os.EX_OK
13955
13956                 myparams = create_depgraph_params(myopts, myaction)
13957                 if "--quiet" not in myopts and "--nodeps" not in myopts:
13958                         print "Calculating dependencies  ",
13959                         sys.stdout.flush()
13960                 mydepgraph = depgraph(settings, trees, myopts, myparams, spinner)
13961                 try:
13962                         retval, favorites = mydepgraph.select_files(myfiles)
13963                 except portage.exception.PackageNotFound, e:
13964                         portage.writemsg("\n!!! %s\n" % str(e), noiselevel=-1)
13965                         return 1
13966                 except portage.exception.PackageSetNotFound, e:
13967                         root_config = trees[settings["ROOT"]]["root_config"]
13968                         display_missing_pkg_set(root_config, e.value)
13969                         return 1
13970                 if show_spinner:
13971                         print "\b\b... done!"
13972                 if not retval:
13973                         mydepgraph.display_problems()
13974                         return 1
13975
13976         if "--pretend" not in myopts and \
13977                 ("--ask" in myopts or "--tree" in myopts or \
13978                 "--verbose" in myopts) and \
13979                 not ("--quiet" in myopts and "--ask" not in myopts):
13980                 if "--resume" in myopts:
13981                         mymergelist = mydepgraph.altlist()
13982                         if len(mymergelist) == 0:
13983                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
13984                                 return os.EX_OK
13985                         favorites = mtimedb["resume"]["favorites"]
13986                         retval = mydepgraph.display(
13987                                 mydepgraph.altlist(reversed=tree),
13988                                 favorites=favorites)
13989                         mydepgraph.display_problems()
13990                         if retval != os.EX_OK:
13991                                 return retval
13992                         prompt="Would you like to resume merging these packages?"
13993                 else:
13994                         retval = mydepgraph.display(
13995                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
13996                                 favorites=favorites)
13997                         mydepgraph.display_problems()
13998                         if retval != os.EX_OK:
13999                                 return retval
14000                         mergecount=0
14001                         for x in mydepgraph.altlist():
14002                                 if isinstance(x, Package) and x.operation == "merge":
14003                                         mergecount += 1
14004
14005                         if mergecount==0:
14006                                 sets = trees[settings["ROOT"]]["root_config"].sets
14007                                 world_candidates = None
14008                                 if "--noreplace" in myopts and \
14009                                         not oneshot and favorites:
14010                                         # Sets that are not world candidates are filtered
14011                                         # out here since the favorites list needs to be
14012                                         # complete for depgraph.loadResumeCommand() to
14013                                         # operate correctly.
14014                                         world_candidates = [x for x in favorites \
14015                                                 if not (x.startswith(SETPREFIX) and \
14016                                                 not sets[x[1:]].world_candidate)]
14017                                 if "--noreplace" in myopts and \
14018                                         not oneshot and world_candidates:
14019                                         print
14020                                         for x in world_candidates:
14021                                                 print " %s %s" % (good("*"), x)
14022                                         prompt="Would you like to add these packages to your world favorites?"
14023                                 elif settings["AUTOCLEAN"] and "yes"==settings["AUTOCLEAN"]:
14024                                         prompt="Nothing to merge; would you like to auto-clean packages?"
14025                                 else:
14026                                         print
14027                                         print "Nothing to merge; quitting."
14028                                         print
14029                                         return os.EX_OK
14030                         elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
14031                                 prompt="Would you like to fetch the source files for these packages?"
14032                         else:
14033                                 prompt="Would you like to merge these packages?"
14034                 print
14035                 if "--ask" in myopts and userquery(prompt) == "No":
14036                         print
14037                         print "Quitting."
14038                         print
14039                         return os.EX_OK
14040                 # Don't ask again (e.g. when auto-cleaning packages after merge)
14041                 myopts.pop("--ask", None)
14042
14043         if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14044                 if ("--resume" in myopts):
14045                         mymergelist = mydepgraph.altlist()
14046                         if len(mymergelist) == 0:
14047                                 print colorize("INFORM", "emerge: It seems we have nothing to resume...")
14048                                 return os.EX_OK
14049                         favorites = mtimedb["resume"]["favorites"]
14050                         retval = mydepgraph.display(
14051                                 mydepgraph.altlist(reversed=tree),
14052                                 favorites=favorites)
14053                         mydepgraph.display_problems()
14054                         if retval != os.EX_OK:
14055                                 return retval
14056                 else:
14057                         retval = mydepgraph.display(
14058                                 mydepgraph.altlist(reversed=("--tree" in myopts)),
14059                                 favorites=favorites)
14060                         mydepgraph.display_problems()
14061                         if retval != os.EX_OK:
14062                                 return retval
14063                         if "--buildpkgonly" in myopts:
14064                                 graph_copy = mydepgraph.digraph.clone()
14065                                 for node in list(graph_copy.order):
14066                                         if not isinstance(node, Package):
14067                                                 graph_copy.remove(node)
14068                                 if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
14069                                         print "\n!!! --buildpkgonly requires all dependencies to be merged."
14070                                         print "!!! You have to merge the dependencies before you can build this package.\n"
14071                                         return 1
14072         else:
14073                 if "--buildpkgonly" in myopts:
14074                         graph_copy = mydepgraph.digraph.clone()
14075                         for node in list(graph_copy.order):
14076                                 if not isinstance(node, Package):
14077                                         graph_copy.remove(node)
14078                         if not graph_copy.hasallzeros(ignore_priority=DepPriority.MEDIUM):
14079                                 print "\n!!! --buildpkgonly requires all dependencies to be merged."
14080                                 print "!!! Cannot merge requested packages. Merge deps and try again.\n"
14081                                 return 1
14082
14083                 if ("--resume" in myopts):
14084                         favorites=mtimedb["resume"]["favorites"]
14085                         mymergelist = mydepgraph.altlist()
14086                         mydepgraph.break_refs(mymergelist)
14087                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14088                                 spinner, mymergelist, favorites, mydepgraph.schedulerGraph())
14089                         del mydepgraph, mymergelist
14090                         clear_caches(trees)
14091
14092                         retval = mergetask.merge()
14093                         merge_count = mergetask.curval
14094                 else:
14095                         if "resume" in mtimedb and \
14096                         "mergelist" in mtimedb["resume"] and \
14097                         len(mtimedb["resume"]["mergelist"]) > 1:
14098                                 mtimedb["resume_backup"] = mtimedb["resume"]
14099                                 del mtimedb["resume"]
14100                                 mtimedb.commit()
14101                         mtimedb["resume"]={}
14102                         # Stored as a dict starting with portage-2.1.6_rc1, and supported
14103                         # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
14104                         # a list type for options.
14105                         mtimedb["resume"]["myopts"] = myopts.copy()
14106
14107                         # Convert Atom instances to plain str since the mtimedb loader
14108                         # sets unpickler.find_global = None which causes unpickler.load()
14109                         # to raise the following exception:
14110                         #
14111                         # cPickle.UnpicklingError: Global and instance pickles are not supported.
14112                         #
14113                         # TODO: Maybe stop setting find_global = None, or find some other
14114                         # way to avoid accidental triggering of the above UnpicklingError.
14115                         mtimedb["resume"]["favorites"] = [str(x) for x in favorites]
14116
14117                         if ("--digest" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
14118                                 for pkgline in mydepgraph.altlist():
14119                                         if pkgline[0]=="ebuild" and pkgline[3]=="merge":
14120                                                 y = trees[pkgline[1]]["porttree"].dbapi.findname(pkgline[2])
14121                                                 tmpsettings = portage.config(clone=settings)
14122                                                 edebug = 0
14123                                                 if settings.get("PORTAGE_DEBUG", "") == "1":
14124                                                         edebug = 1
14125                                                 retval = portage.doebuild(
14126                                                         y, "digest", settings["ROOT"], tmpsettings, edebug,
14127                                                         ("--pretend" in myopts),
14128                                                         mydbapi=trees[pkgline[1]]["porttree"].dbapi,
14129                                                         tree="porttree")
14130
14131                         pkglist = mydepgraph.altlist()
14132                         mydepgraph.saveNomergeFavorites()
14133                         mydepgraph.break_refs(pkglist)
14134                         mergetask = Scheduler(settings, trees, mtimedb, myopts,
14135                                 spinner, pkglist, favorites, mydepgraph.schedulerGraph())
14136                         del mydepgraph, pkglist
14137                         clear_caches(trees)
14138
14139                         retval = mergetask.merge()
14140                         merge_count = mergetask.curval
14141
14142                 if retval == os.EX_OK and not (buildpkgonly or fetchonly or pretend):
14143                         if "yes" == settings.get("AUTOCLEAN"):
14144                                 portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
14145                                 unmerge(trees[settings["ROOT"]]["root_config"],
14146                                         myopts, "clean", [],
14147                                         ldpath_mtimes, autoclean=1)
14148                         else:
14149                                 portage.writemsg_stdout(colorize("WARN", "WARNING:")
14150                                         + " AUTOCLEAN is disabled.  This can cause serious"
14151                                         + " problems due to overlapping packages.\n")
14152                         trees[settings["ROOT"]]["vartree"].dbapi.plib_registry.pruneNonExisting()
14153
14154                 return retval
14155
14156 def multiple_actions(action1, action2):
14157         sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
14158         sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
14159         sys.exit(1)
14160
14161 def insert_optional_args(args):
14162         """
14163         Parse optional arguments and insert a value if one has
14164         not been provided. This is done before feeding the args
14165         to the optparse parser since that parser does not support
14166         this feature natively.
14167         """
14168
14169         new_args = []
14170         jobs_opts = ("-j", "--jobs")
14171         arg_stack = args[:]
14172         arg_stack.reverse()
14173         while arg_stack:
14174                 arg = arg_stack.pop()
14175
14176                 short_job_opt = bool("j" in arg and arg[:1] == "-" and arg[:2] != "--")
14177                 if not (short_job_opt or arg in jobs_opts):
14178                         new_args.append(arg)
14179                         continue
14180
14181                 # Insert an empty placeholder in order to
14182                 # satisfy the requirements of optparse.
14183
14184                 new_args.append("--jobs")
14185                 job_count = None
14186                 saved_opts = None
14187                 if short_job_opt and len(arg) > 2:
14188                         if arg[:2] == "-j":
14189                                 try:
14190                                         job_count = int(arg[2:])
14191                                 except ValueError:
14192                                         saved_opts = arg[2:]
14193                         else:
14194                                 job_count = "True"
14195                                 saved_opts = arg[1:].replace("j", "")
14196
14197                 if job_count is None and arg_stack:
14198                         try:
14199                                 job_count = int(arg_stack[-1])
14200                         except ValueError:
14201                                 pass
14202                         else:
14203                                 # Discard the job count from the stack
14204                                 # since we're consuming it here.
14205                                 arg_stack.pop()
14206
14207                 if job_count is None:
14208                         # unlimited number of jobs
14209                         new_args.append("True")
14210                 else:
14211                         new_args.append(str(job_count))
14212
14213                 if saved_opts is not None:
14214                         new_args.append("-" + saved_opts)
14215
14216         return new_args
14217
14218 def parse_opts(tmpcmdline, silent=False):
14219         myaction=None
14220         myopts = {}
14221         myfiles=[]
14222
14223         global actions, options, shortmapping
14224
14225         longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
14226         argument_options = {
14227                 "--config-root": {
14228                         "help":"specify the location for portage configuration files",
14229                         "action":"store"
14230                 },
14231                 "--color": {
14232                         "help":"enable or disable color output",
14233                         "type":"choice",
14234                         "choices":("y", "n")
14235                 },
14236
14237                 "--jobs": {
14238
14239                         "help"   : "Specifies the number of packages to build " + \
14240                                 "simultaneously.",
14241
14242                         "action" : "store"
14243                 },
14244
14245                 "--load-average": {
14246
14247                         "help"   :"Specifies that no new builds should be started " + \
14248                                 "if there are other builds running and the load average " + \
14249                                 "is at least LOAD (a floating-point number).",
14250
14251                         "action" : "store"
14252                 },
14253
14254                 "--with-bdeps": {
14255                         "help":"include unnecessary build time dependencies",
14256                         "type":"choice",
14257                         "choices":("y", "n")
14258                 },
14259                 "--reinstall": {
14260                         "help":"specify conditions to trigger package reinstallation",
14261                         "type":"choice",
14262                         "choices":["changed-use"]
14263                 }
14264         }
14265
14266         from optparse import OptionParser
14267         parser = OptionParser()
14268         if parser.has_option("--help"):
14269                 parser.remove_option("--help")
14270
14271         for action_opt in actions:
14272                 parser.add_option("--" + action_opt, action="store_true",
14273                         dest=action_opt.replace("-", "_"), default=False)
14274         for myopt in options:
14275                 parser.add_option(myopt, action="store_true",
14276                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14277         for shortopt, longopt in shortmapping.iteritems():
14278                 parser.add_option("-" + shortopt, action="store_true",
14279                         dest=longopt.lstrip("--").replace("-", "_"), default=False)
14280         for myalias, myopt in longopt_aliases.iteritems():
14281                 parser.add_option(myalias, action="store_true",
14282                         dest=myopt.lstrip("--").replace("-", "_"), default=False)
14283
14284         for myopt, kwargs in argument_options.iteritems():
14285                 parser.add_option(myopt,
14286                         dest=myopt.lstrip("--").replace("-", "_"), **kwargs)
14287
14288         tmpcmdline = insert_optional_args(tmpcmdline)
14289
14290         myoptions, myargs = parser.parse_args(args=tmpcmdline)
14291
14292         if myoptions.jobs:
14293                 jobs = None
14294                 if myoptions.jobs == "True":
14295                         jobs = True
14296                 else:
14297                         try:
14298                                 jobs = int(myoptions.jobs)
14299                         except ValueError:
14300                                 jobs = -1
14301
14302                 if jobs is not True and \
14303                         jobs < 1:
14304                         jobs = None
14305                         if not silent:
14306                                 writemsg("!!! Invalid --jobs parameter: '%s'\n" % \
14307                                         (myoptions.jobs,), noiselevel=-1)
14308
14309                 myoptions.jobs = jobs
14310
14311         if myoptions.load_average:
14312                 try:
14313                         load_average = float(myoptions.load_average)
14314                 except ValueError:
14315                         load_average = 0.0
14316
14317                 if load_average <= 0.0:
14318                         load_average = None
14319                         if not silent:
14320                                 writemsg("!!! Invalid --load-average parameter: '%s'\n" % \
14321                                         (myoptions.load_average,), noiselevel=-1)
14322
14323                 myoptions.load_average = load_average
14324
14325         for myopt in options:
14326                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
14327                 if v:
14328                         myopts[myopt] = True
14329
14330         for myopt in argument_options:
14331                 v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
14332                 if v is not None:
14333                         myopts[myopt] = v
14334
14335         for action_opt in actions:
14336                 v = getattr(myoptions, action_opt.replace("-", "_"))
14337                 if v:
14338                         if myaction:
14339                                 multiple_actions(myaction, action_opt)
14340                                 sys.exit(1)
14341                         myaction = action_opt
14342
14343         myfiles += myargs
14344
14345         return myaction, myopts, myfiles
14346
14347 def validate_ebuild_environment(trees):
14348         for myroot in trees:
14349                 settings = trees[myroot]["vartree"].settings
14350                 settings.validate()
14351
14352 def clear_caches(trees):
14353         for d in trees.itervalues():
14354                 d["porttree"].dbapi.melt()
14355                 d["porttree"].dbapi._aux_cache.clear()
14356                 d["bintree"].dbapi._aux_cache.clear()
14357                 d["bintree"].dbapi._clear_cache()
14358                 d["vartree"].dbapi.linkmap._clear_cache()
14359         portage.dircache.clear()
14360         gc.collect()
14361
14362 def load_emerge_config(trees=None):
14363         kwargs = {}
14364         for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT")):
14365                 v = os.environ.get(envvar, None)
14366                 if v and v.strip():
14367                         kwargs[k] = v
14368         trees = portage.create_trees(trees=trees, **kwargs)
14369
14370         for root, root_trees in trees.iteritems():
14371                 settings = root_trees["vartree"].settings
14372                 setconfig = load_default_config(settings, root_trees)
14373                 root_trees["root_config"] = RootConfig(settings, root_trees, setconfig)
14374
14375         settings = trees["/"]["vartree"].settings
14376
14377         for myroot in trees:
14378                 if myroot != "/":
14379                         settings = trees[myroot]["vartree"].settings
14380                         break
14381
14382         mtimedbfile = os.path.join("/", portage.CACHE_PATH.lstrip(os.path.sep), "mtimedb")
14383         mtimedb = portage.MtimeDB(mtimedbfile)
14384         
14385         return settings, trees, mtimedb
14386
14387 def adjust_config(myopts, settings):
14388         """Make emerge specific adjustments to the config."""
14389
14390         # To enhance usability, make some vars case insensitive by forcing them to
14391         # lower case.
14392         for myvar in ("AUTOCLEAN", "NOCOLOR"):
14393                 if myvar in settings:
14394                         settings[myvar] = settings[myvar].lower()
14395                         settings.backup_changes(myvar)
14396         del myvar
14397
14398         # Kill noauto as it will break merges otherwise.
14399         if "noauto" in settings.features:
14400                 while "noauto" in settings.features:
14401                         settings.features.remove("noauto")
14402                 settings["FEATURES"] = " ".join(settings.features)
14403                 settings.backup_changes("FEATURES")
14404
14405         CLEAN_DELAY = 5
14406         try:
14407                 CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
14408         except ValueError, e:
14409                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14410                 portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
14411                         settings["CLEAN_DELAY"], noiselevel=-1)
14412         settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
14413         settings.backup_changes("CLEAN_DELAY")
14414
14415         EMERGE_WARNING_DELAY = 10
14416         try:
14417                 EMERGE_WARNING_DELAY = int(settings.get(
14418                         "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
14419         except ValueError, e:
14420                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14421                 portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
14422                         settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
14423         settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
14424         settings.backup_changes("EMERGE_WARNING_DELAY")
14425
14426         if "--quiet" in myopts:
14427                 settings["PORTAGE_QUIET"]="1"
14428                 settings.backup_changes("PORTAGE_QUIET")
14429
14430         if "--verbose" in myopts:
14431                 settings["PORTAGE_VERBOSE"] = "1"
14432                 settings.backup_changes("PORTAGE_VERBOSE")
14433
14434         # Set so that configs will be merged regardless of remembered status
14435         if ("--noconfmem" in myopts):
14436                 settings["NOCONFMEM"]="1"
14437                 settings.backup_changes("NOCONFMEM")
14438
14439         # Set various debug markers... They should be merged somehow.
14440         PORTAGE_DEBUG = 0
14441         try:
14442                 PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
14443                 if PORTAGE_DEBUG not in (0, 1):
14444                         portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
14445                                 PORTAGE_DEBUG, noiselevel=-1)
14446                         portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
14447                                 noiselevel=-1)
14448                         PORTAGE_DEBUG = 0
14449         except ValueError, e:
14450                 portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
14451                 portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
14452                         settings["PORTAGE_DEBUG"], noiselevel=-1)
14453                 del e
14454         if "--debug" in myopts:
14455                 PORTAGE_DEBUG = 1
14456         settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
14457         settings.backup_changes("PORTAGE_DEBUG")
14458
14459         if settings.get("NOCOLOR") not in ("yes","true"):
14460                 portage.output.havecolor = 1
14461
14462         """The explicit --color < y | n > option overrides the NOCOLOR environment
14463         variable and stdout auto-detection."""
14464         if "--color" in myopts:
14465                 if "y" == myopts["--color"]:
14466                         portage.output.havecolor = 1
14467                         settings["NOCOLOR"] = "false"
14468                 else:
14469                         portage.output.havecolor = 0
14470                         settings["NOCOLOR"] = "true"
14471                 settings.backup_changes("NOCOLOR")
14472         elif not sys.stdout.isatty() and settings.get("NOCOLOR") != "no":
14473                 portage.output.havecolor = 0
14474                 settings["NOCOLOR"] = "true"
14475                 settings.backup_changes("NOCOLOR")
14476
14477 def apply_priorities(settings):
14478         ionice(settings)
14479         nice(settings)
14480
14481 def nice(settings):
14482         try:
14483                 os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
14484         except (OSError, ValueError), e:
14485                 out = portage.output.EOutput()
14486                 out.eerror("Failed to change nice value to '%s'" % \
14487                         settings["PORTAGE_NICENESS"])
14488                 out.eerror("%s\n" % str(e))
14489
14490 def ionice(settings):
14491
14492         ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
14493         if ionice_cmd:
14494                 ionice_cmd = shlex.split(ionice_cmd)
14495         if not ionice_cmd:
14496                 return
14497
14498         from portage.util import varexpand
14499         variables = {"PID" : str(os.getpid())}
14500         cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
14501
14502         try:
14503                 rval = portage.process.spawn(cmd, env=os.environ)
14504         except portage.exception.CommandNotFound:
14505                 # The OS kernel probably doesn't support ionice,
14506                 # so return silently.
14507                 return
14508
14509         if rval != os.EX_OK:
14510                 out = portage.output.EOutput()
14511                 out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
14512                 out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
14513
14514 def display_missing_pkg_set(root_config, set_name):
14515
14516         msg = []
14517         msg.append(("emerge: There are no sets to satisfy '%s'. " + \
14518                 "The following sets exist:") % \
14519                 colorize("INFORM", set_name))
14520         msg.append("")
14521
14522         for s in sorted(root_config.sets):
14523                 msg.append("    %s" % s)
14524         msg.append("")
14525
14526         writemsg_level("".join("%s\n" % l for l in msg),
14527                 level=logging.ERROR, noiselevel=-1)
14528
14529 def expand_set_arguments(myfiles, myaction, root_config):
14530         retval = os.EX_OK
14531         setconfig = root_config.setconfig
14532
14533         sets = setconfig.getSets()
14534
14535         # In order to know exactly which atoms/sets should be added to the
14536         # world file, the depgraph performs set expansion later. It will get
14537         # confused about where the atoms came from if it's not allowed to
14538         # expand them itself.
14539         do_not_expand = (None, )
14540         newargs = []
14541         for a in myfiles:
14542                 if a in ("system", "world"):
14543                         newargs.append(SETPREFIX+a)
14544                 else:
14545                         newargs.append(a)
14546         myfiles = newargs
14547         del newargs
14548         newargs = []
14549
14550         # separators for set arguments
14551         ARG_START = "{"
14552         ARG_END = "}"
14553
14554         # WARNING: all operators must be of equal length
14555         IS_OPERATOR = "/@"
14556         DIFF_OPERATOR = "-@"
14557         UNION_OPERATOR = "+@"
14558         
14559         for i in range(0, len(myfiles)):
14560                 if myfiles[i].startswith(SETPREFIX):
14561                         start = 0
14562                         end = 0
14563                         x = myfiles[i][len(SETPREFIX):]
14564                         newset = ""
14565                         while x:
14566                                 start = x.find(ARG_START)
14567                                 end = x.find(ARG_END)
14568                                 if start > 0 and start < end:
14569                                         namepart = x[:start]
14570                                         argpart = x[start+1:end]
14571                                 
14572                                         # TODO: implement proper quoting
14573                                         args = argpart.split(",")
14574                                         options = {}
14575                                         for a in args:
14576                                                 if "=" in a:
14577                                                         k, v  = a.split("=", 1)
14578                                                         options[k] = v
14579                                                 else:
14580                                                         options[a] = "True"
14581                                         setconfig.update(namepart, options)
14582                                         newset += (x[:start-len(namepart)]+namepart)
14583                                         x = x[end+len(ARG_END):]
14584                                 else:
14585                                         newset += x
14586                                         x = ""
14587                         myfiles[i] = SETPREFIX+newset
14588                                 
14589         sets = setconfig.getSets()
14590
14591         # display errors that occured while loading the SetConfig instance
14592         for e in setconfig.errors:
14593                 print colorize("BAD", "Error during set creation: %s" % e)
14594         
14595         # emerge relies on the existance of sets with names "world" and "system"
14596         required_sets = ("world", "system")
14597         missing_sets = []
14598
14599         for s in required_sets:
14600                 if s not in sets:
14601                         missing_sets.append(s)
14602         if missing_sets:
14603                 if len(missing_sets) > 2:
14604                         missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
14605                         missing_sets_str += ', and "%s"' % missing_sets[-1]
14606                 elif len(missing_sets) == 2:
14607                         missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
14608                 else:
14609                         missing_sets_str = '"%s"' % missing_sets[-1]
14610                 msg = ["emerge: incomplete set configuration, " + \
14611                         "missing set(s): %s" % missing_sets_str]
14612                 if sets:
14613                         msg.append("        sets defined: %s" % ", ".join(sets))
14614                 msg.append("        This usually means that '%s'" % \
14615                         (os.path.join(portage.const.GLOBAL_CONFIG_PATH, "sets.conf"),))
14616                 msg.append("        is missing or corrupt.")
14617                 for line in msg:
14618                         writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
14619                 return (None, 1)
14620         unmerge_actions = ("unmerge", "prune", "clean", "depclean")
14621
14622         for a in myfiles:
14623                 if a.startswith(SETPREFIX):
14624                         # support simple set operations (intersection, difference and union)
14625                         # on the commandline. Expressions are evaluated strictly left-to-right
14626                         if IS_OPERATOR in a or DIFF_OPERATOR in a or UNION_OPERATOR in a:
14627                                 expression = a[len(SETPREFIX):]
14628                                 expr_sets = []
14629                                 expr_ops = []
14630                                 while IS_OPERATOR in expression or DIFF_OPERATOR in expression or UNION_OPERATOR in expression:
14631                                         is_pos = expression.rfind(IS_OPERATOR)
14632                                         diff_pos = expression.rfind(DIFF_OPERATOR)
14633                                         union_pos = expression.rfind(UNION_OPERATOR)
14634                                         op_pos = max(is_pos, diff_pos, union_pos)
14635                                         s1 = expression[:op_pos]
14636                                         s2 = expression[op_pos+len(IS_OPERATOR):]
14637                                         op = expression[op_pos:op_pos+len(IS_OPERATOR)]
14638                                         if not s2 in sets:
14639                                                 display_missing_pkg_set(root_config, s2)
14640                                                 return (None, 1)
14641                                         expr_sets.insert(0, s2)
14642                                         expr_ops.insert(0, op)
14643                                         expression = s1
14644                                 if not expression in sets:
14645                                         display_missing_pkg_set(root_config, expression)
14646                                         return (None, 1)
14647                                 expr_sets.insert(0, expression)
14648                                 result = set(setconfig.getSetAtoms(expression))
14649                                 for i in range(0, len(expr_ops)):
14650                                         s2 = setconfig.getSetAtoms(expr_sets[i+1])
14651                                         if expr_ops[i] == IS_OPERATOR:
14652                                                 result.intersection_update(s2)
14653                                         elif expr_ops[i] == DIFF_OPERATOR:
14654                                                 result.difference_update(s2)
14655                                         elif expr_ops[i] == UNION_OPERATOR:
14656                                                 result.update(s2)
14657                                         else:
14658                                                 raise NotImplementedError("unknown set operator %s" % expr_ops[i])
14659                                 newargs.extend(result)
14660                         else:                   
14661                                 s = a[len(SETPREFIX):]
14662                                 if s not in sets:
14663                                         display_missing_pkg_set(root_config, s)
14664                                         return (None, 1)
14665                                 setconfig.active.append(s)
14666                                 try:
14667                                         set_atoms = setconfig.getSetAtoms(s)
14668                                 except portage.exception.PackageSetNotFound, e:
14669                                         writemsg_level(("emerge: the given set '%s' " + \
14670                                                 "contains a non-existent set named '%s'.\n") % \
14671                                                 (s, e), level=logging.ERROR, noiselevel=-1)
14672                                         return (None, 1)
14673                                 if myaction in unmerge_actions and \
14674                                                 not sets[s].supportsOperation("unmerge"):
14675                                         sys.stderr.write("emerge: the given set '%s' does " % s + \
14676                                                 "not support unmerge operations\n")
14677                                         retval = 1
14678                                 elif not set_atoms:
14679                                         print "emerge: '%s' is an empty set" % s
14680                                 elif myaction not in do_not_expand:
14681                                         newargs.extend(set_atoms)
14682                                 else:
14683                                         newargs.append(SETPREFIX+s)
14684                                 for e in sets[s].errors:
14685                                         print e
14686                 else:
14687                         newargs.append(a)
14688         return (newargs, retval)
14689
14690 def repo_name_check(trees):
14691         missing_repo_names = set()
14692         for root, root_trees in trees.iteritems():
14693                 if "porttree" in root_trees:
14694                         portdb = root_trees["porttree"].dbapi
14695                         missing_repo_names.update(portdb.porttrees)
14696                         repos = portdb.getRepositories()
14697                         for r in repos:
14698                                 missing_repo_names.discard(portdb.getRepositoryPath(r))
14699                         if portdb.porttree_root in missing_repo_names and \
14700                                 not os.path.exists(os.path.join(
14701                                 portdb.porttree_root, "profiles")):
14702                                 # This is normal if $PORTDIR happens to be empty,
14703                                 # so don't warn about it.
14704                                 missing_repo_names.remove(portdb.porttree_root)
14705
14706         if missing_repo_names:
14707                 msg = []
14708                 msg.append("WARNING: One or more repositories " + \
14709                         "have missing repo_name entries:")
14710                 msg.append("")
14711                 for p in missing_repo_names:
14712                         msg.append("\t%s/profiles/repo_name" % (p,))
14713                 msg.append("")
14714                 msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
14715                         "should be a plain text file containing a unique " + \
14716                         "name for the repository on the first line.", 70))
14717                 writemsg_level("".join("%s\n" % l for l in msg),
14718                         level=logging.WARNING, noiselevel=-1)
14719
14720         return bool(missing_repo_names)
14721
14722 def config_protect_check(trees):
14723         for root, root_trees in trees.iteritems():
14724                 if not root_trees["root_config"].settings.get("CONFIG_PROTECT"):
14725                         msg = "!!! CONFIG_PROTECT is empty"
14726                         if root != "/":
14727                                 msg += " for '%s'" % root
14728                         writemsg_level(msg, level=logging.WARN, noiselevel=-1)
14729
14730 def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
14731
14732         if "--quiet" in myopts:
14733                 print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14734                 print "!!! one of the following fully-qualified ebuild names instead:\n"
14735                 for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14736                         print "    " + colorize("INFORM", cp)
14737                 return
14738
14739         s = search(root_config, spinner, "--searchdesc" in myopts,
14740                 "--quiet" not in myopts, "--usepkg" in myopts,
14741                 "--usepkgonly" in myopts)
14742         null_cp = portage.dep_getkey(insert_category_into_atom(
14743                 arg, "null"))
14744         cat, atom_pn = portage.catsplit(null_cp)
14745         s.searchkey = atom_pn
14746         for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
14747                 s.addCP(cp)
14748         s.output()
14749         print "!!! The short ebuild name \"%s\" is ambiguous. Please specify" % arg
14750         print "!!! one of the above fully-qualified ebuild names instead.\n"
14751
14752 def profile_check(trees, myaction, myopts):
14753         if myaction in ("info", "sync"):
14754                 return os.EX_OK
14755         elif "--version" in myopts or "--help" in myopts:
14756                 return os.EX_OK
14757         for root, root_trees in trees.iteritems():
14758                 if root_trees["root_config"].settings.profiles:
14759                         continue
14760                 # generate some profile related warning messages
14761                 validate_ebuild_environment(trees)
14762                 msg = "If you have just changed your profile configuration, you " + \
14763                         "should revert back to the previous configuration. Due to " + \
14764                         "your current profile being invalid, allowed actions are " + \
14765                         "limited to --help, --info, --sync, and --version."
14766                 writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
14767                         level=logging.ERROR, noiselevel=-1)
14768                 return 1
14769         return os.EX_OK
14770
14771 def emerge_main():
14772         global portage  # NFC why this is necessary now - genone
14773         portage._disable_legacy_globals()
14774         # Disable color until we're sure that it should be enabled (after
14775         # EMERGE_DEFAULT_OPTS has been parsed).
14776         portage.output.havecolor = 0
14777         # This first pass is just for options that need to be known as early as
14778         # possible, such as --config-root.  They will be parsed again later,
14779         # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
14780         # the value of --config-root).
14781         myaction, myopts, myfiles = parse_opts(sys.argv[1:], silent=True)
14782         if "--debug" in myopts:
14783                 os.environ["PORTAGE_DEBUG"] = "1"
14784         if "--config-root" in myopts:
14785                 os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
14786
14787         # Portage needs to ensure a sane umask for the files it creates.
14788         os.umask(022)
14789         settings, trees, mtimedb = load_emerge_config()
14790         portdb = trees[settings["ROOT"]]["porttree"].dbapi
14791         rval = profile_check(trees, myaction, myopts)
14792         if rval != os.EX_OK:
14793                 return rval
14794
14795         if portage._global_updates(trees, mtimedb["updates"]):
14796                 mtimedb.commit()
14797                 # Reload the whole config from scratch.
14798                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14799                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14800
14801         xterm_titles = "notitles" not in settings.features
14802
14803         tmpcmdline = []
14804         if "--ignore-default-opts" not in myopts:
14805                 tmpcmdline.extend(settings["EMERGE_DEFAULT_OPTS"].split())
14806         tmpcmdline.extend(sys.argv[1:])
14807         myaction, myopts, myfiles = parse_opts(tmpcmdline)
14808
14809         if "--digest" in myopts:
14810                 os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
14811                 # Reload the whole config from scratch so that the portdbapi internal
14812                 # config is updated with new FEATURES.
14813                 settings, trees, mtimedb = load_emerge_config(trees=trees)
14814                 portdb = trees[settings["ROOT"]]["porttree"].dbapi
14815
14816         for myroot in trees:
14817                 mysettings =  trees[myroot]["vartree"].settings
14818                 mysettings.unlock()
14819                 adjust_config(myopts, mysettings)
14820                 mysettings["PORTAGE_COUNTER_HASH"] = \
14821                         trees[myroot]["vartree"].dbapi._counter_hash()
14822                 mysettings.backup_changes("PORTAGE_COUNTER_HASH")
14823                 mysettings.lock()
14824                 del myroot, mysettings
14825
14826         apply_priorities(settings)
14827
14828         spinner = stdout_spinner()
14829         if "candy" in settings.features:
14830                 spinner.update = spinner.update_scroll
14831
14832         if "--quiet" not in myopts:
14833                 portage.deprecated_profile_check(settings=settings)
14834                 repo_name_check(trees)
14835                 config_protect_check(trees)
14836
14837         eclasses_overridden = {}
14838         for mytrees in trees.itervalues():
14839                 mydb = mytrees["porttree"].dbapi
14840                 # Freeze the portdbapi for performance (memoize all xmatch results).
14841                 mydb.freeze()
14842                 eclasses_overridden.update(mydb.eclassdb._master_eclasses_overridden)
14843         del mytrees, mydb
14844
14845         if eclasses_overridden and \
14846                 settings.get("PORTAGE_ECLASS_WARNING_ENABLE") != "0":
14847                 prefix = bad(" * ")
14848                 if len(eclasses_overridden) == 1:
14849                         writemsg(prefix + "Overlay eclass overrides " + \
14850                                 "eclass from PORTDIR:\n", noiselevel=-1)
14851                 else:
14852                         writemsg(prefix + "Overlay eclasses override " + \
14853                                 "eclasses from PORTDIR:\n", noiselevel=-1)
14854                 writemsg(prefix + "\n", noiselevel=-1)
14855                 for eclass_name in sorted(eclasses_overridden):
14856                         writemsg(prefix + "  '%s/%s.eclass'\n" % \
14857                                 (eclasses_overridden[eclass_name], eclass_name),
14858                                 noiselevel=-1)
14859                 writemsg(prefix + "\n", noiselevel=-1)
14860                 msg = "It is best to avoid overriding eclasses from PORTDIR " + \
14861                 "because it will trigger invalidation of cached ebuild metadata " + \
14862                 "that is distributed with the portage tree. If you must " + \
14863                 "override eclasses from PORTDIR then you are advised to add " + \
14864                 "FEATURES=\"metadata-transfer\" to /etc/make.conf and to run " + \
14865                 "`emerge --regen` after each time that you run `emerge --sync`. " + \
14866                 "Set PORTAGE_ECLASS_WARNING_ENABLE=\"0\" in /etc/make.conf if " + \
14867                 "you would like to disable this warning."
14868                 from textwrap import wrap
14869                 for line in wrap(msg, 72):
14870                         writemsg("%s%s\n" % (prefix, line), noiselevel=-1)
14871
14872         if "moo" in myfiles:
14873                 print """
14874
14875   Larry loves Gentoo (""" + platform.system() + """)
14876
14877  _______________________
14878 < Have you mooed today? >
14879  -----------------------
14880         \   ^__^
14881          \  (oo)\_______
14882             (__)\       )\/\ 
14883                 ||----w |
14884                 ||     ||
14885
14886 """
14887
14888         for x in myfiles:
14889                 ext = os.path.splitext(x)[1]
14890                 if (ext == ".ebuild" or ext == ".tbz2") and os.path.exists(os.path.abspath(x)):
14891                         print colorize("BAD", "\n*** emerging by path is broken and may not always work!!!\n")
14892                         break
14893
14894         root_config = trees[settings["ROOT"]]["root_config"]
14895         if myaction == "list-sets":
14896                 sys.stdout.write("".join("%s\n" % s for s in sorted(root_config.sets)))
14897                 sys.stdout.flush()
14898                 return os.EX_OK
14899
14900         # only expand sets for actions taking package arguments
14901         oldargs = myfiles[:]
14902         if myaction in ("clean", "config", "depclean", "info", "prune", "unmerge", None):
14903                 myfiles, retval = expand_set_arguments(myfiles, myaction, root_config)
14904                 if retval != os.EX_OK:
14905                         return retval
14906
14907                 # Need to handle empty sets specially, otherwise emerge will react 
14908                 # with the help message for empty argument lists
14909                 if oldargs and not myfiles:
14910                         print "emerge: no targets left after set expansion"
14911                         return 0
14912
14913         if ("--tree" in myopts) and ("--columns" in myopts):
14914                 print "emerge: can't specify both of \"--tree\" and \"--columns\"."
14915                 return 1
14916
14917         if ("--quiet" in myopts):
14918                 spinner.update = spinner.update_quiet
14919                 portage.util.noiselimit = -1
14920
14921         # Always create packages if FEATURES=buildpkg
14922         # Imply --buildpkg if --buildpkgonly
14923         if ("buildpkg" in settings.features) or ("--buildpkgonly" in myopts):
14924                 if "--buildpkg" not in myopts:
14925                         myopts["--buildpkg"] = True
14926
14927         # Also allow -S to invoke search action (-sS)
14928         if ("--searchdesc" in myopts):
14929                 if myaction and myaction != "search":
14930                         myfiles.append(myaction)
14931                 if "--search" not in myopts:
14932                         myopts["--search"] = True
14933                 myaction = "search"
14934
14935         # Always try and fetch binary packages if FEATURES=getbinpkg
14936         if ("getbinpkg" in settings.features):
14937                 myopts["--getbinpkg"] = True
14938
14939         if "--buildpkgonly" in myopts:
14940                 # --buildpkgonly will not merge anything, so
14941                 # it cancels all binary package options.
14942                 for opt in ("--getbinpkg", "--getbinpkgonly",
14943                         "--usepkg", "--usepkgonly"):
14944                         myopts.pop(opt, None)
14945
14946         if "--fetch-all-uri" in myopts:
14947                 myopts["--fetchonly"] = True
14948
14949         if "--skipfirst" in myopts and "--resume" not in myopts:
14950                 myopts["--resume"] = True
14951
14952         if ("--getbinpkgonly" in myopts) and not ("--usepkgonly" in myopts):
14953                 myopts["--usepkgonly"] = True
14954
14955         if ("--getbinpkgonly" in myopts) and not ("--getbinpkg" in myopts):
14956                 myopts["--getbinpkg"] = True
14957
14958         if ("--getbinpkg" in myopts) and not ("--usepkg" in myopts):
14959                 myopts["--usepkg"] = True
14960
14961         # Also allow -K to apply --usepkg/-k
14962         if ("--usepkgonly" in myopts) and not ("--usepkg" in myopts):
14963                 myopts["--usepkg"] = True
14964
14965         # Allow -p to remove --ask
14966         if ("--pretend" in myopts) and ("--ask" in myopts):
14967                 print ">>> --pretend disables --ask... removing --ask from options."
14968                 del myopts["--ask"]
14969
14970         # forbid --ask when not in a terminal
14971         # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
14972         if ("--ask" in myopts) and (not sys.stdin.isatty()):
14973                 portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
14974                         noiselevel=-1)
14975                 return 1
14976
14977         if settings.get("PORTAGE_DEBUG", "") == "1":
14978                 spinner.update = spinner.update_quiet
14979                 portage.debug=1
14980                 if "python-trace" in settings.features:
14981                         import portage.debug
14982                         portage.debug.set_trace(True)
14983
14984         if not ("--quiet" in myopts):
14985                 if not sys.stdout.isatty() or ("--nospinner" in myopts):
14986                         spinner.update = spinner.update_basic
14987
14988         if "--version" in myopts:
14989                 print getportageversion(settings["PORTDIR"], settings["ROOT"],
14990                         settings.profile_path, settings["CHOST"],
14991                         trees[settings["ROOT"]]["vartree"].dbapi)
14992                 return 0
14993         elif "--help" in myopts:
14994                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
14995                 return 0
14996
14997         if "--debug" in myopts:
14998                 print "myaction", myaction
14999                 print "myopts", myopts
15000
15001         if not myaction and not myfiles and "--resume" not in myopts:
15002                 _emerge.help.help(myaction, myopts, portage.output.havecolor)
15003                 return 1
15004
15005         pretend = "--pretend" in myopts
15006         fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
15007         buildpkgonly = "--buildpkgonly" in myopts
15008
15009         # check if root user is the current user for the actions where emerge needs this
15010         if portage.secpass < 2:
15011                 # We've already allowed "--version" and "--help" above.
15012                 if "--pretend" not in myopts and myaction not in ("search","info"):
15013                         need_superuser = not \
15014                                 (fetchonly or \
15015                                 (buildpkgonly and secpass >= 1) or \
15016                                 myaction in ("metadata", "regen") or \
15017                                 (myaction == "sync" and os.access(settings["PORTDIR"], os.W_OK)))
15018                         if portage.secpass < 1 or \
15019                                 need_superuser:
15020                                 if need_superuser:
15021                                         access_desc = "superuser"
15022                                 else:
15023                                         access_desc = "portage group"
15024                                 # Always show portage_group_warning() when only portage group
15025                                 # access is required but the user is not in the portage group.
15026                                 from portage.data import portage_group_warning
15027                                 if "--ask" in myopts:
15028                                         myopts["--pretend"] = True
15029                                         del myopts["--ask"]
15030                                         print ("%s access is required... " + \
15031                                                 "adding --pretend to options.\n") % access_desc
15032                                         if portage.secpass < 1 and not need_superuser:
15033                                                 portage_group_warning()
15034                                 else:
15035                                         sys.stderr.write(("emerge: %s access is " + \
15036                                                 "required.\n\n") % access_desc)
15037                                         if portage.secpass < 1 and not need_superuser:
15038                                                 portage_group_warning()
15039                                         return 1
15040
15041         disable_emergelog = False
15042         for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
15043                 if x in myopts:
15044                         disable_emergelog = True
15045                         break
15046         if myaction in ("search", "info"):
15047                 disable_emergelog = True
15048         if disable_emergelog:
15049                 """ Disable emergelog for everything except build or unmerge
15050                 operations.  This helps minimize parallel emerge.log entries that can
15051                 confuse log parsers.  We especially want it disabled during
15052                 parallel-fetch, which uses --resume --fetchonly."""
15053                 global emergelog
15054                 def emergelog(*pargs, **kargs):
15055                         pass
15056
15057         if not "--pretend" in myopts:
15058                 emergelog(xterm_titles, "Started emerge on: "+\
15059                         time.strftime("%b %d, %Y %H:%M:%S", time.localtime()))
15060                 myelogstr=""
15061                 if myopts:
15062                         myelogstr=" ".join(myopts)
15063                 if myaction:
15064                         myelogstr+=" "+myaction
15065                 if myfiles:
15066                         myelogstr += " " + " ".join(oldargs)
15067                 emergelog(xterm_titles, " *** emerge " + myelogstr)
15068         del oldargs
15069
15070         def emergeexitsig(signum, frame):
15071                 signal.signal(signal.SIGINT, signal.SIG_IGN)
15072                 signal.signal(signal.SIGTERM, signal.SIG_IGN)
15073                 portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % {"signal":signum})
15074                 sys.exit(100+signum)
15075         signal.signal(signal.SIGINT, emergeexitsig)
15076         signal.signal(signal.SIGTERM, emergeexitsig)
15077
15078         def emergeexit():
15079                 """This gets out final log message in before we quit."""
15080                 if "--pretend" not in myopts:
15081                         emergelog(xterm_titles, " *** terminating.")
15082                 if "notitles" not in settings.features:
15083                         xtermTitleReset()
15084         portage.atexit_register(emergeexit)
15085
15086         if myaction in ("config", "metadata", "regen", "sync"):
15087                 if "--pretend" in myopts:
15088                         sys.stderr.write(("emerge: The '%s' action does " + \
15089                                 "not support '--pretend'.\n") % myaction)
15090                         return 1
15091
15092         if "sync" == myaction:
15093                 return action_sync(settings, trees, mtimedb, myopts, myaction)
15094         elif "metadata" == myaction:
15095                 action_metadata(settings, portdb, myopts)
15096         elif myaction=="regen":
15097                 validate_ebuild_environment(trees)
15098                 action_regen(settings, portdb, myopts.get("--jobs"),
15099                         myopts.get("--load-average"))
15100         # HELP action
15101         elif "config"==myaction:
15102                 validate_ebuild_environment(trees)
15103                 action_config(settings, trees, myopts, myfiles)
15104
15105         # SEARCH action
15106         elif "search"==myaction:
15107                 validate_ebuild_environment(trees)
15108                 action_search(trees[settings["ROOT"]]["root_config"],
15109                         myopts, myfiles, spinner)
15110         elif myaction in ("clean", "unmerge") or \
15111                 (myaction == "prune" and "--nodeps" in myopts):
15112                 validate_ebuild_environment(trees)
15113
15114                 # Ensure atoms are valid before calling unmerge().
15115                 # For backward compat, leading '=' is not required.
15116                 for x in myfiles:
15117                         if is_valid_package_atom(x) or \
15118                                 is_valid_package_atom("=" + x):
15119                                 continue
15120                         msg = []
15121                         msg.append("'%s' is not a valid package atom." % (x,))
15122                         msg.append("Please check ebuild(5) for full details.")
15123                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15124                                 level=logging.ERROR, noiselevel=-1)
15125                         return 1
15126
15127                 # When given a list of atoms, unmerge
15128                 # them in the order given.
15129                 ordered = myaction == "unmerge"
15130                 if 1 == unmerge(root_config, myopts, myaction, myfiles,
15131                         mtimedb["ldpath"], ordered=ordered):
15132                         if not (buildpkgonly or fetchonly or pretend):
15133                                 post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15134
15135         elif myaction in ("depclean", "info", "prune"):
15136
15137                 # Ensure atoms are valid before calling unmerge().
15138                 vardb = trees[settings["ROOT"]]["vartree"].dbapi
15139                 valid_atoms = []
15140                 for x in myfiles:
15141                         if is_valid_package_atom(x):
15142                                 try:
15143                                         valid_atoms.append(
15144                                                 portage.dep_expand(x, mydb=vardb, settings=settings))
15145                                 except portage.exception.AmbiguousPackageName, e:
15146                                         msg = "The short ebuild name \"" + x + \
15147                                                 "\" is ambiguous.  Please specify " + \
15148                                                 "one of the following " + \
15149                                                 "fully-qualified ebuild names instead:"
15150                                         for line in textwrap.wrap(msg, 70):
15151                                                 writemsg_level("!!! %s\n" % (line,),
15152                                                         level=logging.ERROR, noiselevel=-1)
15153                                         for i in e[0]:
15154                                                 writemsg_level("    %s\n" % colorize("INFORM", i),
15155                                                         level=logging.ERROR, noiselevel=-1)
15156                                         writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
15157                                         return 1
15158                                 continue
15159                         msg = []
15160                         msg.append("'%s' is not a valid package atom." % (x,))
15161                         msg.append("Please check ebuild(5) for full details.")
15162                         writemsg_level("".join("!!! %s\n" % line for line in msg),
15163                                 level=logging.ERROR, noiselevel=-1)
15164                         return 1
15165
15166                 if myaction == "info":
15167                         return action_info(settings, trees, myopts, valid_atoms)
15168
15169                 validate_ebuild_environment(trees)
15170                 action_depclean(settings, trees, mtimedb["ldpath"],
15171                         myopts, myaction, valid_atoms, spinner)
15172                 if not (buildpkgonly or fetchonly or pretend):
15173                         post_emerge(root_config, myopts, mtimedb, os.EX_OK)
15174         # "update", "system", or just process files:
15175         else:
15176                 validate_ebuild_environment(trees)
15177                 if "--pretend" not in myopts:
15178                         display_news_notification(root_config, myopts)
15179                 retval = action_build(settings, trees, mtimedb,
15180                         myopts, myaction, myfiles, spinner)
15181                 root_config = trees[settings["ROOT"]]["root_config"]
15182                 post_emerge(root_config, myopts, mtimedb, retval)
15183
15184                 return retval